From bfd10c0db409161abd91d35c769ddc0d7b28f43d Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Wed, 23 Jan 2013 17:46:25 +0000 Subject: Add a configuration extension for setting hostname --- set-hostname.configure | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 set-hostname.configure diff --git a/set-hostname.configure b/set-hostname.configure new file mode 100755 index 00000000..e44c5d56 --- /dev/null +++ b/set-hostname.configure @@ -0,0 +1,27 @@ +#!/bin/sh +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +# Set hostname on system from HOSTNAME. + + +set -e + +if [ -n "$HOSTNAME" ] +then + echo "$HOSTNAME" > "$1/etc/hostname" +fi + -- cgit v1.2.1 From d64dc1e0b80d3bc09fcca05d15fe10cd60c332a6 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Wed, 23 Jan 2013 17:11:43 +0000 Subject: Add a write extension for raw disk images --- rawdisk.write | 70 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100755 rawdisk.write diff --git a/rawdisk.write b/rawdisk.write new file mode 100755 index 00000000..c6f9c7f6 --- /dev/null +++ b/rawdisk.write @@ -0,0 +1,70 @@ +#!/usr/bin/python +# Copyright (C) 2012-2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +'''A Morph deployment write extension for raw disk images.''' + + +import os +import time +import tempfile + +import morphlib.writeexts + + +class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): + + '''Create a raw disk image during Morph's deployment. + + The location command line argument is the pathname of the disk image + to be created. + + ''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + self.status( + msg='Temporary system root: %(temp_root)s', + temp_root=temp_root) + self.status( + msg='Disk image to create: %(location)s', + location=location) + + size = self.get_disk_size() + self.status(msg='Disk size is %(size)d bytes', size=size) + + self.create_raw_disk_image(location, size) + self.mkfs_btrfs(location) + mp = self.mount(location) + try: + self.create_factory(mp, temp_root) + self.create_fstab(mp) + self.install_extlinux(mp) + except BaseException, e: + self.status(msg='EEK') + self.unmount(mp) + raise + else: + self.unmount(mp) + + self.status(msg='Disk image has been created') + + +RawDiskWriteExtension().run() + -- cgit v1.2.1 From 2d40cac81a029f29c8e16593112a8c18e945f83b Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Thu, 24 Jan 2013 15:08:18 +0000 Subject: Add a write extension for VirtualBox --- virtualbox-ssh.write | 158 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) create mode 100755 virtualbox-ssh.write diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write new file mode 100755 index 00000000..e2f592e7 --- /dev/null +++ b/virtualbox-ssh.write @@ -0,0 +1,158 @@ +#!/usr/bin/python +# Copyright (C) 2012-2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +'''A Morph deployment write extension for deploying to VirtualBox via ssh. + +VirtualBox is assumed to be running on a remote machine, which is +accessed over ssh. The machine gets created, but not started. + +''' + + +import os +import re +import time +import tempfile +import urlparse + +import morphlib.writeexts + + +class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): + + '''Create a VirtualBox virtual machine during Morph's deployment. + + The location command line argument is the pathname of the disk image + to be created. The user is expected to provide the location argument + using the following syntax: + + vbox+ssh://HOST/GUEST/PATH + + where: + + * HOST is the host on which VirtualBox is running + * GUEST is the name of the guest virtual machine on that host + * PATH is the path to the disk image that should be created, + on that host + + The extension will connect to HOST via ssh to run VirtualBox's + command line management tools. + + ''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + ssh_host, vm_name, vdi_path = self.parse_location(location) + + self.status( + msg='Temporary system root: %(temp_root)s', + temp_root=temp_root) + self.status( + msg='VirtualBox server: %(ssh_host)s', + ssh_host=ssh_host) + self.status( + msg='VirtualBox guest: %(vm_name)s', + vm_name=vm_name) + + size = self.get_disk_size() + self.status(msg='Disk size is %(size)d bytes', size=size) + + raw_disk = tempfile.mkstemp() + self.create_raw_disk_image(raw_disk, size) + try: + self.mkfs_btrfs(raw_disk) + mp = self.mount(raw_disk) + except BaseException: + self.status('EEEK') + os.remove(raw_disk) + raise + try: + self.create_factory(mp, temp_root) + self.create_fstab(mp) + self.install_extlinux(mp) + except BaseException, e: + self.status(msg='EEK') + self.unmount(mp) + os.remove(raw_disk) + raise + else: + self.unmount(mp) + + try: + self.transfer_and_convert_to_vdi( + raw_disk, size, ssh_host, vdi_path) + self.create_virtualbox_guest(ssh_host, vm_name, vdi_path) + except BaseException: + self.status('EEEK') + os.remove(raw_disk) + raise + else: + os.remove(raw_disk) + + self.status( + msg='Virtual machine %(vm_name)s has been created', + vm_name=vm_name) + + def parse_location(self, location): + '''Parse the location argument to get relevant data.''' + + x = urlparse.urlparse(location) + if x.scheme != 'vbox+ssh': + raise cliapp.AppException( + 'URL schema must be vbox+ssh in %s' % location) + m = re.match('^/(?P[^/]+)(?P/.+)$', x.path) + if not m: + raise cliapp.AppException('Cannot parse location %s' % location) + return x.netloc, m.group('guest'), m.group('path') + + def transfer_and_convert_to_vdi(self, raw_disk, size, ssh_host, vdi_path): + '''Transfer raw disk image to VirtualBox host, and convert to VDI.''' + + self.status(msg='Transfer disk and convert to VDI') + with open(raw_disk, 'rb') as f: + cliapp.runcmd( + ['ssh', ssh_host, + 'VBoxManage', 'convertfromraw', 'stdin', vdi_path, str(size)], + stdin=f) + + def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path): + '''Create the VirtualBox virtual machine.''' + + self.status(msg='Create VirtualBox virtual machine') + + commands = [ + ['createvm', '--name', vm_name, '--ostype', 'Linux26_64', + '--register'], + ['modifyvm', vm_name, '--ioapic', 'on', '--memory', '1024', + '--nic1', 'nat'], + ['storagectl', vm_name, '--name', '"SATA Controller"', + '--add', 'sata', '--bootable', 'on', '--sataportcount', '2'], + ['storageattach', vm_name, '--storagectl', '"SATA Controller"', + '--port', '0', '--device', '0', '--type', 'hdd', '--medium', + vdi_path], + ] + + for command in commands: + argv = ['ssh', ssh_host, 'VBoxManage'] + command + cliapp.runcmd(argv) + + +VirtualBoxPlusSshWriteExtension().run() + -- cgit v1.2.1 From 253b541bfbfb18f731f90348bae480f304297d36 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Thu, 24 Jan 2013 15:41:11 +0000 Subject: Add a write extension for kvm+libvirt --- kvm.write | 140 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100755 kvm.write diff --git a/kvm.write b/kvm.write new file mode 100755 index 00000000..ed85b17e --- /dev/null +++ b/kvm.write @@ -0,0 +1,140 @@ +#!/usr/bin/python +# Copyright (C) 2012-2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +'''A Morph deployment write extension for deploying to KVM+libvirt.''' + + +import os +import re +import urlparse + +import morphlib.writeexts + + +class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): + + '''Create a KVM/LibVirt virtual machine during Morph's deployment. + + The location command line argument is the pathname of the disk image + to be created. The user is expected to provide the location argument + using the following syntax: + + kvm+ssh://HOST/GUEST/PATH + + where: + + * HOST is the host on which KVM/LibVirt is running + * GUEST is the name of the guest virtual machine on that host + * PATH is the path to the disk image that should be created, + on that host + + The extension will connect to HOST via ssh to run libvirt's + command line management tools. + + ''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + ssh_host, vm_name, vm_path = self.parse_location(location) + + self.status( + msg='Temporary system root: %(temp_root)s', + temp_root=temp_root) + self.status( + msg='libvirt server: %(ssh_host)s', + ssh_host=ssh_host) + self.status( + msg='libvirt guest: %(vm_name)s', + vm_name=vm_name) + self.status( + msg='libvirt disk image: %(vm_path)s', + vm_path=vm_path) + + size = self.get_disk_size() + self.status(msg='Disk size is %(size)d bytes', size=size) + + raw_disk = tempfile.mkstemp() + self.create_raw_disk_image(raw_disk, size) + try: + self.mkfs_btrfs(raw_disk) + mp = self.mount(raw_disk) + except BaseException: + self.status(msg='EEEK') + os.remove(raw_disk) + raise + try: + self.create_factory(mp, temp_root) + self.create_fstab(mp) + self.install_extlinux(mp) + except BaseException, e: + self.status(msg='EEK') + self.unmount(mp) + os.remove(raw_disk) + raise + else: + self.unmount(mp) + + try: + self.transfer(raw_disk, size, ssh_host, vm_path) + self.create_libvirt_guest(ssh_host, vm_name, vm_path) + except BaseException: + self.status(msg='EEEK') + os.remove(raw_disk) + raise + else: + os.remove(raw_disk) + + self.status( + msg='Virtual machine %(vm_name)s has been created', + vm_name=vm_name) + + def parse_location(self, location): + '''Parse the location argument to get relevant data.''' + + x = urlparse.urlparse(location) + if x.scheme != 'kvm+ssh': + raise cliapp.AppException( + 'URL schema must be vbox+ssh in %s' % location) + m = re.match('^/(?P[^/]+)(?P/.+)$', x.path) + if not m: + raise cliapp.AppException('Cannot parse location %s' % location) + return x.netloc, m.group('guest'), m.group('path') + + def transfer(self, raw_disk, size, ssh_host, vm_path): + '''Transfer raw disk image to libvirt host.''' + + self.status(msg='Transfer disk image') + target = '%s:%s' % (ssh_host, vm_path) + with open(raw_disk, 'rb') as f: + cliapp.runcmd(['rsync', '-zS', raw_disk, target]) + + def create_libvirt_guest(self, ssh_host, vm_name, vm_path): + '''Create the libvirt virtual machine.''' + + self.status(msg='Create libvirt/kvm virtual machine') + cliapp.runcmd( + ['ssh', ssh_host, + 'virt-install', '--connect qemu:///system', '--import', + '--name', vm_name, '--ram', '1024', '--vnc', '--noreboot', + '--disk path=%s,bus=ide' % vm_path]) + + +KvmPlusSshWriteExtension().run() + -- cgit v1.2.1 From 769c0ae2808a3c3e383c470795510b72c45c5a76 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Thu, 7 Feb 2013 11:51:06 +0000 Subject: Remove debugging output Suggested-By: Richard Maw --- kvm.write | 13 ------------- rawdisk.write | 6 ------ virtualbox-ssh.write | 10 ---------- 3 files changed, 29 deletions(-) diff --git a/kvm.write b/kvm.write index ed85b17e..e52e5553 100755 --- a/kvm.write +++ b/kvm.write @@ -54,19 +54,6 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args ssh_host, vm_name, vm_path = self.parse_location(location) - self.status( - msg='Temporary system root: %(temp_root)s', - temp_root=temp_root) - self.status( - msg='libvirt server: %(ssh_host)s', - ssh_host=ssh_host) - self.status( - msg='libvirt guest: %(vm_name)s', - vm_name=vm_name) - self.status( - msg='libvirt disk image: %(vm_path)s', - vm_path=vm_path) - size = self.get_disk_size() self.status(msg='Disk size is %(size)d bytes', size=size) diff --git a/rawdisk.write b/rawdisk.write index c6f9c7f6..89e9e82b 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -39,12 +39,6 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Wrong number of command line args') temp_root, location = args - self.status( - msg='Temporary system root: %(temp_root)s', - temp_root=temp_root) - self.status( - msg='Disk image to create: %(location)s', - location=location) size = self.get_disk_size() self.status(msg='Disk size is %(size)d bytes', size=size) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index e2f592e7..dbfb90a7 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -61,16 +61,6 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args ssh_host, vm_name, vdi_path = self.parse_location(location) - self.status( - msg='Temporary system root: %(temp_root)s', - temp_root=temp_root) - self.status( - msg='VirtualBox server: %(ssh_host)s', - ssh_host=ssh_host) - self.status( - msg='VirtualBox guest: %(vm_name)s', - vm_name=vm_name) - size = self.get_disk_size() self.status(msg='Disk size is %(size)d bytes', size=size) -- cgit v1.2.1 From 099f2ef05cd52dd623b47901ecc361754aa729f7 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Thu, 7 Feb 2013 11:41:04 +0000 Subject: Refactor: Add WriteExtension.create_local_system method This allows code sharing amongst all the places that create a system in a raw disk image. This also adds the creation of a factory-run subvolume, and fixes error messages for errors that happen during a disk image creation. Suggested-By: Richard Maw Suggested-By: Sam Thursfield --- kvm.write | 35 +++++++++-------------------------- rawdisk.write | 21 +++------------------ virtualbox-ssh.write | 29 +++++------------------------ 3 files changed, 17 insertions(+), 68 deletions(-) diff --git a/kvm.write b/kvm.write index e52e5553..09a7d224 100755 --- a/kvm.write +++ b/kvm.write @@ -18,8 +18,11 @@ '''A Morph deployment write extension for deploying to KVM+libvirt.''' +import cliapp import os import re +import sys +import tempfile import urlparse import morphlib.writeexts @@ -54,35 +57,15 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args ssh_host, vm_name, vm_path = self.parse_location(location) - size = self.get_disk_size() - self.status(msg='Disk size is %(size)d bytes', size=size) + fd, raw_disk = tempfile.mkstemp() + os.close(fd) + self.create_local_system(temp_root, raw_disk) - raw_disk = tempfile.mkstemp() - self.create_raw_disk_image(raw_disk, size) try: - self.mkfs_btrfs(raw_disk) - mp = self.mount(raw_disk) - except BaseException: - self.status(msg='EEEK') - os.remove(raw_disk) - raise - try: - self.create_factory(mp, temp_root) - self.create_fstab(mp) - self.install_extlinux(mp) - except BaseException, e: - self.status(msg='EEK') - self.unmount(mp) - os.remove(raw_disk) - raise - else: - self.unmount(mp) - - try: - self.transfer(raw_disk, size, ssh_host, vm_path) + self.transfer(raw_disk, ssh_host, vm_path) self.create_libvirt_guest(ssh_host, vm_name, vm_path) except BaseException: - self.status(msg='EEEK') + sys.stderr.write('Error deploying to libvirt') os.remove(raw_disk) raise else: @@ -104,7 +87,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Cannot parse location %s' % location) return x.netloc, m.group('guest'), m.group('path') - def transfer(self, raw_disk, size, ssh_host, vm_path): + def transfer(self, raw_disk, ssh_host, vm_path): '''Transfer raw disk image to libvirt host.''' self.status(msg='Transfer disk image') diff --git a/rawdisk.write b/rawdisk.write index 89e9e82b..a55473f2 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -19,6 +19,7 @@ import os +import sys import time import tempfile @@ -40,24 +41,8 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args - size = self.get_disk_size() - self.status(msg='Disk size is %(size)d bytes', size=size) - - self.create_raw_disk_image(location, size) - self.mkfs_btrfs(location) - mp = self.mount(location) - try: - self.create_factory(mp, temp_root) - self.create_fstab(mp) - self.install_extlinux(mp) - except BaseException, e: - self.status(msg='EEK') - self.unmount(mp) - raise - else: - self.unmount(mp) - - self.status(msg='Disk image has been created') + self.create_local_system(temp_root, location) + self.status(msg='Disk image has been created at %s' % location) RawDiskWriteExtension().run() diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index dbfb90a7..5d118ec4 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -23,6 +23,7 @@ accessed over ssh. The machine gets created, but not started. ''' +import cliapp import os import re import time @@ -61,36 +62,16 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args ssh_host, vm_name, vdi_path = self.parse_location(location) - size = self.get_disk_size() - self.status(msg='Disk size is %(size)d bytes', size=size) - - raw_disk = tempfile.mkstemp() - self.create_raw_disk_image(raw_disk, size) - try: - self.mkfs_btrfs(raw_disk) - mp = self.mount(raw_disk) - except BaseException: - self.status('EEEK') - os.remove(raw_disk) - raise - try: - self.create_factory(mp, temp_root) - self.create_fstab(mp) - self.install_extlinux(mp) - except BaseException, e: - self.status(msg='EEK') - self.unmount(mp) - os.remove(raw_disk) - raise - else: - self.unmount(mp) + fd, raw_disk = tempfile.mkstemp() + os.close(fd) + self.create_local_system(temp_root) try: self.transfer_and_convert_to_vdi( raw_disk, size, ssh_host, vdi_path) self.create_virtualbox_guest(ssh_host, vm_name, vdi_path) except BaseException: - self.status('EEEK') + sys.stderr.write('Error deploying to VirtualBox') os.remove(raw_disk) raise else: -- cgit v1.2.1 From 311a41778532dc5d102f5c8e2cbe0d3dd58cadf1 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Thu, 7 Feb 2013 15:17:56 +0000 Subject: Add missing argument to create_local_system method Reported-By: Richard Maw --- virtualbox-ssh.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 5d118ec4..c21dcc57 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -64,7 +64,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): fd, raw_disk = tempfile.mkstemp() os.close(fd) - self.create_local_system(temp_root) + self.create_local_system(temp_root, raw_disk) try: self.transfer_and_convert_to_vdi( -- cgit v1.2.1 From 66b2c4cdb96fc20042d0ebb34f903c4692ae5bb5 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Tue, 12 Feb 2013 16:48:03 +0000 Subject: Improve message phrasing Suggested-By: Richard Maw --- kvm.write | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kvm.write b/kvm.write index 09a7d224..1579dc1f 100755 --- a/kvm.write +++ b/kvm.write @@ -90,7 +90,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): def transfer(self, raw_disk, ssh_host, vm_path): '''Transfer raw disk image to libvirt host.''' - self.status(msg='Transfer disk image') + self.status(msg='Transferring disk image') target = '%s:%s' % (ssh_host, vm_path) with open(raw_disk, 'rb') as f: cliapp.runcmd(['rsync', '-zS', raw_disk, target]) @@ -98,7 +98,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): def create_libvirt_guest(self, ssh_host, vm_name, vm_path): '''Create the libvirt virtual machine.''' - self.status(msg='Create libvirt/kvm virtual machine') + self.status(msg='Creating libvirt/kvm virtual machine') cliapp.runcmd( ['ssh', ssh_host, 'virt-install', '--connect qemu:///system', '--import', -- cgit v1.2.1 From c57547b87cdad636a28a3f378104c4279b03b45f Mon Sep 17 00:00:00 2001 From: Ric Holland Date: Tue, 5 Mar 2013 10:32:03 +0000 Subject: Fix virtualbox deployment It was missing an import for sys. It did not have the size of the disk either, this has also been fixed --- virtualbox-ssh.write | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index c21dcc57..9b99c7a1 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -26,6 +26,7 @@ accessed over ssh. The machine gets created, but not started. import cliapp import os import re +import sys import time import tempfile import urlparse @@ -68,7 +69,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): try: self.transfer_and_convert_to_vdi( - raw_disk, size, ssh_host, vdi_path) + raw_disk, ssh_host, vdi_path) self.create_virtualbox_guest(ssh_host, vm_name, vdi_path) except BaseException: sys.stderr.write('Error deploying to VirtualBox') @@ -93,14 +94,15 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Cannot parse location %s' % location) return x.netloc, m.group('guest'), m.group('path') - def transfer_and_convert_to_vdi(self, raw_disk, size, ssh_host, vdi_path): + def transfer_and_convert_to_vdi(self, raw_disk, ssh_host, vdi_path): '''Transfer raw disk image to VirtualBox host, and convert to VDI.''' self.status(msg='Transfer disk and convert to VDI') with open(raw_disk, 'rb') as f: cliapp.runcmd( ['ssh', ssh_host, - 'VBoxManage', 'convertfromraw', 'stdin', vdi_path, str(size)], + 'VBoxManage', 'convertfromraw', 'stdin', vdi_path, + str(os.path.getsize(raw_disk))], stdin=f) def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path): -- cgit v1.2.1 From 5178028a2fe4f38a9f163a910926e1ae5e3eb517 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Wed, 13 Mar 2013 13:52:52 +0000 Subject: Add ATTACH_DISKS support to kvm --- kvm.write | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kvm.write b/kvm.write index 1579dc1f..c491f206 100755 --- a/kvm.write +++ b/kvm.write @@ -99,11 +99,18 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '''Create the libvirt virtual machine.''' self.status(msg='Creating libvirt/kvm virtual machine') + + attach_disks = self.parse_attach_disks() + attach_opts = [] + for disk in attach_disks: + attach_opts.extend(['--disk', 'path=%s' % disk]) + cliapp.runcmd( ['ssh', ssh_host, 'virt-install', '--connect qemu:///system', '--import', '--name', vm_name, '--ram', '1024', '--vnc', '--noreboot', - '--disk path=%s,bus=ide' % vm_path]) + '--disk path=%s,bus=ide' % vm_path] + + attach_opts) KvmPlusSshWriteExtension().run() -- cgit v1.2.1 From 717e3e91b3a606cca14f1a4c1d44a28cbbbe3c9e Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Wed, 13 Mar 2013 15:11:36 +0000 Subject: Implement ATTACH_DISKS in virtualbox-ssh.write --- virtualbox-ssh.write | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 9b99c7a1..862d4f02 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -121,7 +121,17 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '--port', '0', '--device', '0', '--type', 'hdd', '--medium', vdi_path], ] - + + attach_disks = self.parse_attach_disks() + for device_no, disk in enumerate(attach_disks, 1): + cmd = ['storageattach', vm_name, + '--storagectl', '"SATA Controller"', + '--port', str(device_no), + '--device', '0', + '--type', 'hdd', + '--medium', disk] + commands.append(cmd) + for command in commands: argv = ['ssh', ssh_host, 'VBoxManage'] + command cliapp.runcmd(argv) -- cgit v1.2.1 From bc7b098eb300fee1cf7b5f09da49a5d23a82415c Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Thu, 28 Mar 2013 13:42:06 +0000 Subject: Set RAM size for kvm deployment --- kvm.write | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kvm.write b/kvm.write index c491f206..630f6ae7 100755 --- a/kvm.write +++ b/kvm.write @@ -105,10 +105,13 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): for disk in attach_disks: attach_opts.extend(['--disk', 'path=%s' % disk]) + ram_mebibytes = str(self.get_ram_size() / (1024**2)) + cliapp.runcmd( ['ssh', ssh_host, 'virt-install', '--connect qemu:///system', '--import', - '--name', vm_name, '--ram', '1024', '--vnc', '--noreboot', + '--name', vm_name, '--vnc', '--noreboot', + '--ram=%s' % ram_mebibytes, '--disk path=%s,bus=ide' % vm_path] + attach_opts) -- cgit v1.2.1 From 02a5188ba2a89af403919f8635e146de003dd05f Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Thu, 28 Mar 2013 13:44:15 +0000 Subject: Set RAM size in VirtualBox deployments --- virtualbox-ssh.write | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 862d4f02..37f56524 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -110,10 +110,12 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): self.status(msg='Create VirtualBox virtual machine') + ram_mebibytes = str(self.get_ram_size() / (1024**2)) + commands = [ ['createvm', '--name', vm_name, '--ostype', 'Linux26_64', '--register'], - ['modifyvm', vm_name, '--ioapic', 'on', '--memory', '1024', + ['modifyvm', vm_name, '--ioapic', 'on', '--memory', ram_mebibytes, '--nic1', 'nat'], ['storagectl', vm_name, '--name', '"SATA Controller"', '--add', 'sata', '--bootable', 'on', '--sataportcount', '2'], -- cgit v1.2.1 From 9c9eb1cdd185145b272e32ed7327c2c91011f4e5 Mon Sep 17 00:00:00 2001 From: Richard Holland Date: Wed, 3 Apr 2013 13:56:46 +0000 Subject: SSH Configuration Extension Added a configuration extension that copies SSH keys across to the deployed system. --- ssh.configure | 141 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100755 ssh.configure diff --git a/ssh.configure b/ssh.configure new file mode 100755 index 00000000..8650b4f5 --- /dev/null +++ b/ssh.configure @@ -0,0 +1,141 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +'''A Morph deployment configuration to copy SSH keys. + +Keys are copied from the host to the new system. +''' + +import cliapp +import os +import sys +import shutil +import glob +import logging + +import morphlib + +class SshConfigurationExtension(cliapp.Application): + + '''Copy over SSH keys to new system from host. + + The extension requires SSH_KEY_DIR to be set at the command line as it + will otherwise pass with only a status update. SSH_KEY_DIR should be + set to the location of the SSH keys to be passed to the new system. + + ''' + + def process_args(self, args): + if 'SSH_KEY_DIR' in os.environ: + # Copies ssh_host keys. + key = 'ssh_host_*_key' + mode = 0755 + dest = os.path.join(args[0], 'etc/ssh/') + sshhost, sshhostpub = self.find_keys(key) + if sshhost or sshhostpub: + self.check_dir(dest, mode) + self.copy_keys(sshhost, sshhostpub, dest) + + # Copies root keys. + key = 'root_*_key' + mode = 0700 + dest = os.path.join(args[0], 'root/.ssh/') + roothost, roothostpub = self.find_keys(key) + key = 'root_authorized_key_*.pub' + authkey, bleh = self.find_keys(key) + if roothost or roothostpub: + self.check_dir(dest, mode) + self.copy_rename_keys(roothost, + roothostpub, dest, 'id_', [15, 4]) + if authkey: + self.check_dir(dest, mode) + self.comb_auth_key(authkey, dest) + else: + self.status(msg="No SSH key directory found.") + pass + + def find_keys(self, key_name): + '''Uses glob to find public and + private SSH keys and returns their path''' + + src = os.path.join(os.environ['SSH_KEY_DIR'], key_name) + keys = glob.glob(src) + pubkeys = glob.glob(src + '.pub') + if not (keys or pubkeys): + self.status(msg="No SSH keys of pattern %(src)s found.", src=src) + return keys, pubkeys + + def check_dir(self, dest, mode): + '''Checks if destination directory exists + and creates it if necessary''' + + if os.path.exists(dest) == False: + self.status(msg="Creating SSH key directory: %(dest)s", dest=dest) + os.mkdir(dest) + os.chmod(dest, mode) + else: + pass + + def copy_keys(self, keys, pubkeys, dest): + '''Copies SSH keys to new VM''' + + for key in keys: + shutil.copy(key, dest) + os.chmod(dest, 0600) + for key in pubkeys: + shutil.copy(key, dest) + os.chmod(dest, 0644) + + def copy_rename_keys(self, keys, pubkeys, dest, new, snip): + '''Copies SSH keys to new VM and renames them''' + + st, fi = snip + for key in keys: + s = len(key) + nw_dst = os.path.join(dest, new + key[st:s-fi]) + shutil.copy(key, nw_dst) + os.chmod(nw_dst, 0600) + for key in pubkeys: + s = len(key) + nw_dst = os.path.join(dest, new + key[st:s-fi-4]) + shutil.copy(key, nw_dst + '.pub') + os.chmod(nw_dst, 0644) + + def comb_auth_key(self, keys, dest): + '''Combines authorized_keys file in new VM''' + + dest = os.path.join(dest, 'authorized_keys') + fout = open(dest, 'a') + for key in keys: + fin = open(key, 'r') + data = fin.read() + fout.write(data) + fin.close() + fout.close() + os.chmod(dest, 0600) + + def status(self, **kwargs): + '''Provide status output. + + The ``msg`` keyword argument is the actual message, + the rest are values for fields in the message as interpolated + by %. + + ''' + + self.output.write('%s\n' % (kwargs['msg'] % kwargs)) + +SshConfigurationExtension().run() -- cgit v1.2.1 From a936783aeba65f7a0219523edc435bd988ae4062 Mon Sep 17 00:00:00 2001 From: Richard Holland Date: Thu, 4 Apr 2013 17:09:36 +0000 Subject: Fixed error SSH configuration extension Fixed error in function copy_rename_key that tried to place key in non existent directory. Required use of os.path.basename to leave name of key and remove rest of path. --- ssh.configure | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ssh.configure b/ssh.configure index 8650b4f5..75b46b11 100755 --- a/ssh.configure +++ b/ssh.configure @@ -59,7 +59,7 @@ class SshConfigurationExtension(cliapp.Application): if roothost or roothostpub: self.check_dir(dest, mode) self.copy_rename_keys(roothost, - roothostpub, dest, 'id_', [15, 4]) + roothostpub, dest, 'id_', [5, 4]) if authkey: self.check_dir(dest, mode) self.comb_auth_key(authkey, dest) @@ -101,16 +101,18 @@ class SshConfigurationExtension(cliapp.Application): def copy_rename_keys(self, keys, pubkeys, dest, new, snip): '''Copies SSH keys to new VM and renames them''' - + st, fi = snip for key in keys: - s = len(key) - nw_dst = os.path.join(dest, new + key[st:s-fi]) + base = os.path.basename(key) + s = len(base) + nw_dst = os.path.join(dest, new + base[st:s-fi]) shutil.copy(key, nw_dst) os.chmod(nw_dst, 0600) for key in pubkeys: - s = len(key) - nw_dst = os.path.join(dest, new + key[st:s-fi-4]) + base = os.path.basename(key) + s = len(base) + nw_dst = os.path.join(dest, new + base[st:s-fi-4]) shutil.copy(key, nw_dst + '.pub') os.chmod(nw_dst, 0644) -- cgit v1.2.1 From 12f67e6a172fe611a014d65b753d3a51a27c7298 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Tue, 26 Mar 2013 12:08:51 +0000 Subject: Add nfsboot configuration extension This configuration-extension removes every network interface but the loopback interface from /etc/network/interfaces and stops /etc/fstab from mounting "/". It will only do this if the environment variable NFSBOOT_CONFIGURE is set on the `morph deploy` command-line. --- nfsboot.configure | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100755 nfsboot.configure diff --git a/nfsboot.configure b/nfsboot.configure new file mode 100755 index 00000000..8dc6c67c --- /dev/null +++ b/nfsboot.configure @@ -0,0 +1,32 @@ +#!/bin/sh +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +# Remove all networking interfaces and stop fstab from mounting '/' + + +set -e +if [ "$NFSBOOT_CONFIGURE" ]; then + # Remove all networking interfaces but loopback + cat > "$1/etc/network/interfaces" < "$1/etc/fstab" +fi -- cgit v1.2.1 From c995c45cf0ba039296153b4e6f463fa6e538ff14 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Tue, 26 Mar 2013 14:11:34 +0000 Subject: Add nfsboot write extension The 'location' command-line argument refers to the hostname of the 'nfsboot server', a baserock system that has an nfs server, the directory '/srv/nfsboot/nfs', and a tftp server that hosts files from the directory '/srv/nfsboot/tftp'. The write extension will read the hostname of the target system and copy its root filesystem to /srv/nfsboot/nfs/ and its kernel to /srv/nfsboot/tftp/. It will then configure the nfs server to export that nfs root. --- nfsboot.write | 161 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100755 nfsboot.write diff --git a/nfsboot.write b/nfsboot.write new file mode 100755 index 00000000..60b4d00d --- /dev/null +++ b/nfsboot.write @@ -0,0 +1,161 @@ +#!/usr/bin/python +# Copyright (C) 2012-2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +'''A Morph deployment write extension for deploying to an nfsboot server + +An nfsboot server is defined as a baserock system that has tftp and nfs +servers running, the tftp server is exporting the contents of +/srv/nfsboot/tftp/ and the user has sufficient permissions to create nfs roots +in /srv/nfsboot/nfs/ + +''' + + +import cliapp +import os +import glob + +import morphlib.writeexts + + +class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): + + '''Create an NFS root and kernel on TFTP during Morph's deployment. + + The location command line argument is the hostname of the nfsboot server. + The user is expected to provide the location argument + using the following syntax: + + HOST + + where: + + * HOST is the host of the nfsboot server + + The extension will connect to root@HOST via ssh to copy the kernel and + rootfs, and configure the nfs server. + + It requires root because it uses systemd, and reads/writes to /etc. + + ''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + hostname = self.get_hostname(temp_root) + if hostname == 'baserock': + raise cliapp.AppException('It is forbidden to nfsboot a system ' + 'with hostname "baserock"') + + self.test_good_server(location) + self.copy_kernel(temp_root, location, hostname) + self.copy_rootfs(temp_root, location, hostname) + self.configure_nfs(location, hostname) + + def get_hostname(self, temp_root): + hostnamepath = os.path.join(temp_root, 'etc', 'hostname') + with open(hostnamepath) as f: + return f.readline().strip() + + def copy_kernel(self, temp_root, location, hostname): + bootdir = os.path.join(temp_root, 'boot') + image_names = ['vmlinuz', 'zImage', 'uImage'] + for name in image_names: + try_path = os.path.join(bootdir, name) + if os.path.exists(try_path): + kernel_src = try_path + break + else: + raise cliapp.AppException( + 'Could not find a kernel in the system: none of ' + '%s found' % ', '.join(image_names)) + kernel_dest = os.path.join('/srv/nfsboot/tftp', hostname) + rsync_dest = 'root@%s:%s' % (location, kernel_dest) + cliapp.runcmd( + ['rsync', kernel_src, rsync_dest]) + + def copy_rootfs(self, temp_root, location, hostname): + rootfs_src = temp_root + '/' + rootfs_dest = os.path.join('/srv/nfsboot/nfs', hostname) + rsync_dest = 'root@%s:%s' % (location, rootfs_dest) + cliapp.runcmd( + ['rsync', '-a', rootfs_src, rsync_dest]) + + def configure_nfs(self, location, hostname): + rootfs_dest = os.path.join('/srv/nfsboot/nfs', hostname) + exports_path = '/etc/exports' + # If that path is not already exported: + try: + cliapp.ssh_runcmd( + 'root@%s' % location, ['grep', '-q', rootfs_dest, + exports_path]) + except cliapp.AppException: + ip_mask = '*' + options = 'rw,no_subtree_check,no_root_squash,async' + exports_string = '%s %s(%s)\n' % (rootfs_dest, ip_mask, options) + exports_append_sh = '''\ +set -eu +target = "$1" +temp = $(mktemp) +cat "$target" > "$temp" +cat >> "$temp" +mv "$temp" "$target" +''' + cliapp.ssh_runcmd( + 'root@%s' % location, + ['sh', '-c', exports_append_sh, '--', exports_path], + feed_stdin=exports_string) + cliapp.ssh_runcmd( + 'root@%s' % location, ['systemctl', 'reload', + 'nfs-server.service']) + + def test_good_server(self, server): + # Can be ssh'ed into + try: + cliapp.ssh_runcmd('root@%s' % server, ['true']) + except cliapp.AppException: + raise cliapp.AppException('You are unable to ssh into server %s' + % server) + + # Is an NFS server + try: + cliapp.ssh_runcmd( + 'root@%s' % server, ['test', '-e', '/etc/exports']) + except cliapp.AppException: + raise cliapp.AppException('server %s is not an nfs server' + % server) + try: + cliapp.ssh_runcmd( + 'root@%s' % server, ['systemctl', 'is-enabled', + 'nfs-server.service']) + + except cliapp.AppException: + raise cliapp.AppException('server %s does not control its ' + 'nfs server by systemd' % server) + + # TFTP server exports /srv/nfsboot/tftp + try: + cliapp.ssh_runcmd( + 'root@%s' % server, ['test' , '-d', '/srv/nfsboot/tftp']) + except cliapp.AppException: + raise cliapp.AppException('server %s does not export ' + '/srv/nfsboot/tftp' % server) + +NFSBootWriteExtension().run() + -- cgit v1.2.1 From 6255080a8227cccbbf892332b3f83b4c52763897 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Mon, 8 Apr 2013 16:49:57 +0000 Subject: Fix nfsboot write-extension Reviewed-by: Lars Wirzenius --- nfsboot.write | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nfsboot.write b/nfsboot.write index 60b4d00d..293b1acf 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -111,8 +111,8 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): exports_string = '%s %s(%s)\n' % (rootfs_dest, ip_mask, options) exports_append_sh = '''\ set -eu -target = "$1" -temp = $(mktemp) +target="$1" +temp=$(mktemp) cat "$target" > "$temp" cat >> "$temp" mv "$temp" "$target" @@ -122,7 +122,7 @@ mv "$temp" "$target" ['sh', '-c', exports_append_sh, '--', exports_path], feed_stdin=exports_string) cliapp.ssh_runcmd( - 'root@%s' % location, ['systemctl', 'reload', + 'root@%s' % location, ['systemctl', 'restart', 'nfs-server.service']) def test_good_server(self, server): -- cgit v1.2.1 From e8c0f666f4407ed24ca20ba90adf809f45a02e45 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Fri, 12 Apr 2013 13:04:54 +0000 Subject: Fix copyright year We only have written this this year, so it should be copyright this year only. --- nfsboot.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nfsboot.write b/nfsboot.write index 293b1acf..34200793 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2012-2013 Codethink Limited +# Copyright (C) 2013 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by -- cgit v1.2.1 From e1ceaaf5547d4ea030fbe0dee5f46aab67c974de Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Wed, 8 May 2013 13:45:31 +0100 Subject: Fix unfortunate typo in ssh configuration extension This tiny typo unfortunately makes root unable to use ssh, since ssh refuses to authenticate if your private key is globally readable, and the typo causes the private key to be given the public key's permissions. --- ssh.configure | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ssh.configure b/ssh.configure index 75b46b11..6012f298 100755 --- a/ssh.configure +++ b/ssh.configure @@ -114,7 +114,7 @@ class SshConfigurationExtension(cliapp.Application): s = len(base) nw_dst = os.path.join(dest, new + base[st:s-fi-4]) shutil.copy(key, nw_dst + '.pub') - os.chmod(nw_dst, 0644) + os.chmod(nw_dst + '.pub', 0644) def comb_auth_key(self, keys, dest): '''Combines authorized_keys file in new VM''' -- cgit v1.2.1 From 937544ac7a370f433eda4240148a33c64ff43c56 Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Wed, 8 May 2013 17:04:45 +0100 Subject: Add AUTOSTART to kvm and libvirt write extensions If AUTOSTART is 'yes' then the VM will be started once it is created. If it is 'no' or undefined, then it will need to be manually started. If it is any other value, then an exception is raised. --- kvm.write | 19 ++++++++++--------- virtualbox-ssh.write | 9 +++++++-- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/kvm.write b/kvm.write index 630f6ae7..e2f7435c 100755 --- a/kvm.write +++ b/kvm.write @@ -56,6 +56,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args ssh_host, vm_name, vm_path = self.parse_location(location) + autostart = self.parse_autostart() fd, raw_disk = tempfile.mkstemp() os.close(fd) @@ -63,7 +64,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): try: self.transfer(raw_disk, ssh_host, vm_path) - self.create_libvirt_guest(ssh_host, vm_name, vm_path) + self.create_libvirt_guest(ssh_host, vm_name, vm_path, autostart) except BaseException: sys.stderr.write('Error deploying to libvirt') os.remove(raw_disk) @@ -95,7 +96,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): with open(raw_disk, 'rb') as f: cliapp.runcmd(['rsync', '-zS', raw_disk, target]) - def create_libvirt_guest(self, ssh_host, vm_name, vm_path): + def create_libvirt_guest(self, ssh_host, vm_name, vm_path, autostart): '''Create the libvirt virtual machine.''' self.status(msg='Creating libvirt/kvm virtual machine') @@ -107,13 +108,13 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): ram_mebibytes = str(self.get_ram_size() / (1024**2)) - cliapp.runcmd( - ['ssh', ssh_host, - 'virt-install', '--connect qemu:///system', '--import', - '--name', vm_name, '--vnc', '--noreboot', - '--ram=%s' % ram_mebibytes, - '--disk path=%s,bus=ide' % vm_path] + - attach_opts) + cmdline = ['ssh', ssh_host, + 'virt-install', '--connect qemu:///system', '--import', + '--name', vm_name, '--vnc', '--ram=%s' % ram_mebibytes, + '--disk path=%s,bus=ide' % vm_path] + attach_opts + if not autostart: + cmdline += '--noreboot' + cliapp.runcmd(cmdline) KvmPlusSshWriteExtension().run() diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 37f56524..cb17b69b 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -62,6 +62,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args ssh_host, vm_name, vdi_path = self.parse_location(location) + autostart = self.parse_autostart() fd, raw_disk = tempfile.mkstemp() os.close(fd) @@ -70,7 +71,8 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): try: self.transfer_and_convert_to_vdi( raw_disk, ssh_host, vdi_path) - self.create_virtualbox_guest(ssh_host, vm_name, vdi_path) + self.create_virtualbox_guest(ssh_host, vm_name, vdi_path, + autostart) except BaseException: sys.stderr.write('Error deploying to VirtualBox') os.remove(raw_disk) @@ -105,7 +107,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): str(os.path.getsize(raw_disk))], stdin=f) - def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path): + def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart): '''Create the VirtualBox virtual machine.''' self.status(msg='Create VirtualBox virtual machine') @@ -134,6 +136,9 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '--medium', disk] commands.append(cmd) + if autostart: + commands.append(['startvm', vm_name]) + for command in commands: argv = ['ssh', ssh_host, 'VBoxManage'] + command cliapp.runcmd(argv) -- cgit v1.2.1 From 45258312f5e0f1a72d9440bb688ed2863200d980 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Thu, 9 May 2013 14:55:35 +0000 Subject: Allow to add public ssh keys to known hosts file --- ssh.configure | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ssh.configure b/ssh.configure index 6012f298..7a7e1667 100755 --- a/ssh.configure +++ b/ssh.configure @@ -24,6 +24,7 @@ import os import sys import shutil import glob +import re import logging import morphlib @@ -63,6 +64,20 @@ class SshConfigurationExtension(cliapp.Application): if authkey: self.check_dir(dest, mode) self.comb_auth_key(authkey, dest) + + # Fills the known_hosts file + key = 'root_known_host_*_key.pub' + src = os.path.join(os.environ['SSH_KEY_DIR'], key) + known_hosts_keys = glob.glob(src) + known_hosts_path = os.path.join(args[0], 'root/.ssh/known_hosts') + known_hosts_file = open(known_hosts_path, "a") + for filename in known_hosts_keys: + hostname = re.search('root_known_host_(.+?)_key.pub', filename).group(1) + known_hosts_file.write(hostname + " ") + f = open(filename, "r") + known_hosts_file.write(f.read()) + f.close() + known_hosts_file.close() else: self.status(msg="No SSH key directory found.") pass -- cgit v1.2.1 From bd0e81a9b5e232124bf4c83e7125cb6fa4c00147 Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Thu, 9 May 2013 15:25:24 +0000 Subject: Merge branch 'support-known-hosts' of git://git.baserock.org/baserock/baserock/morph A column width error was fixed up in the merge. --- ssh.configure | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ssh.configure b/ssh.configure index 7a7e1667..29a3589a 100755 --- a/ssh.configure +++ b/ssh.configure @@ -72,7 +72,8 @@ class SshConfigurationExtension(cliapp.Application): known_hosts_path = os.path.join(args[0], 'root/.ssh/known_hosts') known_hosts_file = open(known_hosts_path, "a") for filename in known_hosts_keys: - hostname = re.search('root_known_host_(.+?)_key.pub', filename).group(1) + hostname = re.search('root_known_host_(.+?)_key.pub', + filename).group(1) known_hosts_file.write(hostname + " ") f = open(filename, "r") known_hosts_file.write(f.read()) -- cgit v1.2.1 From 38a4ce37b1785a200f2bf141fb41b7bb0fde4512 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Thu, 9 May 2013 18:02:06 +0000 Subject: Improve the ssh configuration file. - Don't fail if the root/.ssh directory does not exist when adding entries to the known_hosts file - Use shutil.copyfilobj to copy the file contents - Use the python with statatement when opening files --- ssh.configure | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/ssh.configure b/ssh.configure index 29a3589a..55fbbb30 100755 --- a/ssh.configure +++ b/ssh.configure @@ -69,16 +69,17 @@ class SshConfigurationExtension(cliapp.Application): key = 'root_known_host_*_key.pub' src = os.path.join(os.environ['SSH_KEY_DIR'], key) known_hosts_keys = glob.glob(src) - known_hosts_path = os.path.join(args[0], 'root/.ssh/known_hosts') - known_hosts_file = open(known_hosts_path, "a") - for filename in known_hosts_keys: - hostname = re.search('root_known_host_(.+?)_key.pub', - filename).group(1) - known_hosts_file.write(hostname + " ") - f = open(filename, "r") - known_hosts_file.write(f.read()) - f.close() - known_hosts_file.close() + if known_hosts_keys: + self.check_dir(dest, mode) + known_hosts_path = os.path.join(dest, 'known_hosts') + with open(known_hosts_path, "a") as known_hosts_file: + for filename in known_hosts_keys: + hostname = re.search('root_known_host_(.+?)_key.pub', + filename).group(1) + known_hosts_file.write(hostname + " ") + with open(filename, "r") as f: + shutil.copyfileobj(f, known_hosts_file) + else: self.status(msg="No SSH key directory found.") pass -- cgit v1.2.1 From fb9d20ec4aa279928a692bff5961d413334e3be7 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Thu, 9 May 2013 23:04:48 +0000 Subject: Fix permissions on the ssh configure extension --- ssh.configure | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ssh.configure b/ssh.configure index 55fbbb30..2f3167e7 100755 --- a/ssh.configure +++ b/ssh.configure @@ -111,10 +111,12 @@ class SshConfigurationExtension(cliapp.Application): for key in keys: shutil.copy(key, dest) - os.chmod(dest, 0600) + path = os.path.join(dest, os.path.basename(key)) + os.chmod(path, 0600) for key in pubkeys: shutil.copy(key, dest) - os.chmod(dest, 0644) + path = os.path.join(dest, os.path.basename(key)) + os.chmod(path, 0644) def copy_rename_keys(self, keys, pubkeys, dest, new, snip): '''Copies SSH keys to new VM and renames them''' -- cgit v1.2.1 From c0d4db83c412f8d688b4495a0d2b4bc2342e527b Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Fri, 10 May 2013 04:50:21 +0000 Subject: Add a new configuration extension to drop configuration files to the image /etc --- drop-config-files.configure | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 drop-config-files.configure diff --git a/drop-config-files.configure b/drop-config-files.configure new file mode 100755 index 00000000..0094cf6b --- /dev/null +++ b/drop-config-files.configure @@ -0,0 +1,27 @@ +#!/bin/sh +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +# Copy all files located in $SRC_CONFIG_DIR to the image /etc. + + +set -e + +if [ "x${SRC_CONFIG_DIR}" != x ] +then + cp -r "$SRC_CONFIG_DIR"/* "$1/etc/" +fi + -- cgit v1.2.1 From aacfad8da27cf275b9c11d7220f05d21a28d55ad Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Fri, 10 May 2013 09:08:59 +0000 Subject: Rename the drop-config-files extension to add-config-files --- add-config-files.configure | 27 +++++++++++++++++++++++++++ drop-config-files.configure | 27 --------------------------- 2 files changed, 27 insertions(+), 27 deletions(-) create mode 100755 add-config-files.configure delete mode 100755 drop-config-files.configure diff --git a/add-config-files.configure b/add-config-files.configure new file mode 100755 index 00000000..0094cf6b --- /dev/null +++ b/add-config-files.configure @@ -0,0 +1,27 @@ +#!/bin/sh +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +# Copy all files located in $SRC_CONFIG_DIR to the image /etc. + + +set -e + +if [ "x${SRC_CONFIG_DIR}" != x ] +then + cp -r "$SRC_CONFIG_DIR"/* "$1/etc/" +fi + diff --git a/drop-config-files.configure b/drop-config-files.configure deleted file mode 100755 index 0094cf6b..00000000 --- a/drop-config-files.configure +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh -# Copyright (C) 2013 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - -# Copy all files located in $SRC_CONFIG_DIR to the image /etc. - - -set -e - -if [ "x${SRC_CONFIG_DIR}" != x ] -then - cp -r "$SRC_CONFIG_DIR"/* "$1/etc/" -fi - -- cgit v1.2.1 From bbeec556514f7ccf58d38688f39da2b747010e45 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Mon, 13 May 2013 10:32:45 +0000 Subject: Make rsync delete files that shouldn't exist --- nfsboot.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nfsboot.write b/nfsboot.write index 34200793..e18ff798 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -95,7 +95,7 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): rootfs_dest = os.path.join('/srv/nfsboot/nfs', hostname) rsync_dest = 'root@%s:%s' % (location, rootfs_dest) cliapp.runcmd( - ['rsync', '-a', rootfs_src, rsync_dest]) + ['rsync', '-aXSPH', '--delete', rootfs_src, rsync_dest]) def configure_nfs(self, location, hostname): rootfs_dest = os.path.join('/srv/nfsboot/nfs', hostname) -- cgit v1.2.1 From 2ad0b7db5010c89836be5fae17b138d82c4a033a Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Mon, 13 May 2013 14:31:43 +0000 Subject: Make nfsboot use the new hierarchy --- nfsboot.write | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 74 insertions(+), 11 deletions(-) diff --git a/nfsboot.write b/nfsboot.write index e18ff798..e2ce7db2 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -53,6 +53,8 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): ''' + _nfsboot_root = '/srv/nfsboot' + def process_args(self, args): if len(args) != 2: raise cliapp.AppException('Wrong number of command line args') @@ -64,8 +66,12 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): 'with hostname "baserock"') self.test_good_server(location) - self.copy_kernel(temp_root, location, hostname) - self.copy_rootfs(temp_root, location, hostname) + version = 'version1' + versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', + version) + self.copy_rootfs(temp_root, location, versioned_root, hostname) + self.copy_kernel(temp_root, location, versioned_root, version, + hostname) self.configure_nfs(location, hostname) def get_hostname(self, temp_root): @@ -73,7 +79,8 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): with open(hostnamepath) as f: return f.readline().strip() - def copy_kernel(self, temp_root, location, hostname): + def copy_kernel(self, temp_root, location, versioned_root, version, + hostname): bootdir = os.path.join(temp_root, 'boot') image_names = ['vmlinuz', 'zImage', 'uImage'] for name in image_names: @@ -85,30 +92,86 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException( 'Could not find a kernel in the system: none of ' '%s found' % ', '.join(image_names)) - kernel_dest = os.path.join('/srv/nfsboot/tftp', hostname) + + kernel_dest = os.path.join(versioned_root, 'orig', 'kernel') rsync_dest = 'root@%s:%s' % (location, kernel_dest) + self.status(msg='Copying kernel') cliapp.runcmd( ['rsync', kernel_src, rsync_dest]) - def copy_rootfs(self, temp_root, location, hostname): + # Link the kernel to the right place + self.status(msg='Creating links to kernel in tftp directory') + tftp_dir = os.path.join(self._nfsboot_root , 'tftp') + versioned_kernel_name = "%s-%s" % (hostname, version) + kernel_name = hostname + try: + cliapp.ssh_runcmd('root@%s' % location, + ['ln', '-f', kernel_dest, + os.path.join(tftp_dir, versioned_kernel_name)]) + + cliapp.ssh_runcmd('root@%s' % location, + ['ln', '-sf', versioned_kernel_name, + os.path.join(tftp_dir, kernel_name)]) + except cliapp.AppException: + raise cliapp.AppException('Could not create symlinks to the ' + 'kernel at %s in %s on %s' + % (kernel_dest, tftp_dir, location)) + + def copy_rootfs(self, temp_root, location, versioned_root, hostname): rootfs_src = temp_root + '/' - rootfs_dest = os.path.join('/srv/nfsboot/nfs', hostname) - rsync_dest = 'root@%s:%s' % (location, rootfs_dest) + orig_path = os.path.join(versioned_root, 'orig') + run_path = os.path.join(versioned_root, 'run') + + self.status(msg='Creating destination directories') + try: + cliapp.ssh_runcmd('root@%s' % location, + ['mkdir', '-p', orig_path, run_path]) + except cliapp.AppException: + raise cliapp.AppException('Could not create dirs %s and %s on %s' + % (orig_path, run_path, location)) + + self.status(msg='Creating \'orig\' rootfs') cliapp.runcmd( - ['rsync', '-aXSPH', '--delete', rootfs_src, rsync_dest]) + ['rsync', '-aXSPH', '--delete', rootfs_src, + 'root@%s:%s' % (location, orig_path)]) + + self.status(msg='Creating \'run\' rootfs') + try: + cliapp.ssh_runcmd('root@%s' % location, + ['rm', '-rf', run_path]) + cliapp.ssh_runcmd('root@%s' % location, + ['cp', '-al', orig_path, run_path]) + cliapp.ssh_runcmd('root@%s' % location, + ['rm', '-rf', os.path.join(run_path, 'etc')]) + cliapp.ssh_runcmd('root@%s' % location, + ['cp', '-a', os.path.join(orig_path, 'etc'), + os.path.join(run_path, 'etc')]) + except cliapp.AppException: + raise cliapp.AppException('Could not create \'run\' rootfs' + ' from \'orig\'') + + self.status(msg='Linking \'default-run\' to latest system') + try: + cliapp.ssh_runcmd('root@%s' % location, + ['ln', '-sfn', run_path, + os.path.join(self._nfsboot_root, hostname, 'systems', + 'default-run')]) + except cliapp.AppException: + raise cliapp.AppException('Could not link \'default-run\' to %s' + % run_path) def configure_nfs(self, location, hostname): - rootfs_dest = os.path.join('/srv/nfsboot/nfs', hostname) + exported_path = os.path.join(self._nfsboot_root, hostname) exports_path = '/etc/exports' # If that path is not already exported: try: cliapp.ssh_runcmd( - 'root@%s' % location, ['grep', '-q', rootfs_dest, + 'root@%s' % location, ['grep', '-q', exported_path, exports_path]) except cliapp.AppException: ip_mask = '*' options = 'rw,no_subtree_check,no_root_squash,async' - exports_string = '%s %s(%s)\n' % (rootfs_dest, ip_mask, options) + exports_string = '%s %s(%s)\n' % (exported_path, ip_mask, options) exports_append_sh = '''\ set -eu target="$1" -- cgit v1.2.1 From bfcf3a24c78de00293c3ca59dabf8d3c31711fd8 Mon Sep 17 00:00:00 2001 From: "Daniel Silverstone (br7vm)" Date: Fri, 17 May 2013 12:55:04 +0000 Subject: EXTS: simple-network.configure Simple networking configuration extension, taking NETWORK_CONFIG from the environment and writing /etc/network/interfaces out during deployment configuration. --- simple-network.configure | 143 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100755 simple-network.configure diff --git a/simple-network.configure b/simple-network.configure new file mode 100755 index 00000000..b98b202c --- /dev/null +++ b/simple-network.configure @@ -0,0 +1,143 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +'''A Morph deployment configuration extension to handle /etc/network/interfaces + +This extension prepares /etc/network/interfaces with the interfaces specified +during deployment. + +If no network configuration is provided, eth0 will be configured for DHCP +with the hostname of the system. +''' + + +import os +import sys +import cliapp + +import morphlib + + +class SimpleNetworkError(morphlib.Error): + '''Errors associated with simple network setup''' + pass + + +class SimpleNetworkConfigurationExtension(cliapp.Application): + '''Configure /etc/network/interfaces + + Reading NETWORK_CONFIG, this extension sets up /etc/network/interfaces. + ''' + + def process_args(self, args): + network_config = os.environ.get( + "NETWORK_CONFIG", "lo:loopback;eth0:dhcp,hostname=$(hostname)") + + self.status(msg="Processing NETWORK_CONFIG=%(nc)s", nc=network_config) + + stanzas = self.parse_network_stanzas(network_config) + iface_file = self.generate_iface_file(stanzas) + + with open(os.path.join(args[0], "etc/network/interfaces"), "w") as f: + f.write(iface_file) + + def generate_iface_file(self, stanzas): + """Generate an interfaces file from the provided stanzas. + + The interfaces will be sorted by name, with loopback sorted first. + """ + + def cmp_iface_names(a, b): + a = a['name'] + b = b['name'] + if a == "lo": + return -1 + elif b == "lo": + return 1 + else: + return cmp(a,b) + + return "\n".join(self.generate_iface_stanza(stanza) + for stanza in sorted(stanzas, cmp=cmp_iface_names)) + + def generate_iface_stanza(self, stanza): + """Generate an interfaces stanza from the provided data.""" + + name = stanza['name'] + itype = stanza['type'] + lines = ["auto %s" % name, "iface %s inet %s" % (name, itype)] + lines += [" %s %s" % elem for elem in stanza['args'].items()] + lines += [""] + return "\n".join(lines) + + + def parse_network_stanzas(self, config): + """Parse a network config environment variable into stanzas. + + Network config stanzas are semi-colon separated. + """ + + return [self.parse_network_stanza(s) for s in config.split(";")] + + def parse_network_stanza(self, stanza): + """Parse a network config stanza into name, type and arguments. + + Each stanza is of the form name:type[,arg=value]... + + For example: + lo:loopback + eth0:dhcp + eth1:static,address=10.0.0.1,netmask=255.255.0.0 + """ + elements = stanza.split(",") + lead = elements.pop(0).split(":") + if len(lead) != 2: + raise SimpleNetworkError("Stanza '%s' is missing its type" % + stanza) + iface = lead[0] + iface_type = lead[1] + + if iface_type not in ['loopback', 'static', 'dhcp']: + raise SimpleNetworkError("Stanza '%s' has unknown interface type" + " '%s'" % (stanza, iface_type)) + + argpairs = [element.split("=", 1) for element in elements] + output_stanza = { "name": iface, + "type": iface_type, + "args": {} } + for argpair in argpairs: + if len(argpair) != 2: + raise SimpleNetworkError("Stanza '%s' has bad argument '%r'" + % (stanza, argpair.pop(0))) + if argpair[0] in output_stanza["args"]: + raise SimpleNetworkError("Stanza '%s' has repeated argument" + " %s" % (stanza, argpair[0])) + output_stanza["args"][argpair[0]] = argpair[1] + + return output_stanza + + def status(self, **kwargs): + '''Provide status output. + + The ``msg`` keyword argument is the actual message, + the rest are values for fields in the message as interpolated + by %. + + ''' + + self.output.write('%s\n' % (kwargs['msg'] % kwargs)) + +SimpleNetworkConfigurationExtension().run() -- cgit v1.2.1 From 7e0decbb40fcb779f8e4c4b0fd3718022f3cdd88 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Thu, 16 May 2013 15:14:14 +0000 Subject: Can define version by the environment variable 'VERSION' If the version already exists on the device, deployment is aborted --- nfsboot.write | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/nfsboot.write b/nfsboot.write index e2ce7db2..cfc3fc32 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -66,14 +66,27 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): 'with hostname "baserock"') self.test_good_server(location) - version = 'version1' + version = os.environ['VERSION'] or 'version1' versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', version) + if self.version_exists(versioned_root, location): + raise cliapp.AppException('Version %s already exists on' + ' this device. Deployment aborted' + % version) self.copy_rootfs(temp_root, location, versioned_root, hostname) self.copy_kernel(temp_root, location, versioned_root, version, hostname) self.configure_nfs(location, hostname) + def version_exists(self, versioned_root, location): + try: + cliapp.ssh_runcmd('root@%s' % location, + ['test', '-d', versioned_root]) + except cliapp.AppException: + return False + + return True + def get_hostname(self, temp_root): hostnamepath = os.path.join(temp_root, 'etc', 'hostname') with open(hostnamepath) as f: -- cgit v1.2.1 From f52890784d6bbcd5f636badc9bc6b721a5417afc Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Mon, 20 May 2013 16:26:51 +0000 Subject: nfsboot deployment creates local state dirs --- nfsboot.write | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/nfsboot.write b/nfsboot.write index cfc3fc32..61c5306a 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -92,6 +92,14 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): with open(hostnamepath) as f: return f.readline().strip() + def create_local_state(self, location, hostname): + statedir = os.path.join(self._nfsboot_root, hostname, 'state') + subdirs = [os.path.join(statedir, 'home'), + os.path.join(statedir, 'opt'), + os.path.join(statedir, 'srv')] + cliapp.ssh_runcmd('root@%s' % location, + ['mkdir', '-p'] + subdirs) + def copy_kernel(self, temp_root, location, versioned_root, version, hostname): bootdir = os.path.join(temp_root, 'boot') -- cgit v1.2.1 From 1706b1074fbf01d1cd2bbd13bbb58eb5c28f4718 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Mon, 20 May 2013 11:20:28 +0000 Subject: Implement raw disk upgrades. We perform this by cloning an existing orig directory and then updating the contents using rsync --- rawdisk.write | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/rawdisk.write b/rawdisk.write index a55473f2..76438a5e 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -18,6 +18,7 @@ '''A Morph deployment write extension for raw disk images.''' +import cliapp import os import sys import time @@ -30,8 +31,10 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): '''Create a raw disk image during Morph's deployment. + If the image already exists, it is upgraded. + The location command line argument is the pathname of the disk image - to be created. + to be created/upgraded. ''' @@ -40,9 +43,52 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Wrong number of command line args') temp_root, location = args - - self.create_local_system(temp_root, location) - self.status(msg='Disk image has been created at %s' % location) + if os.path.isfile(location): + self.upgrade_local_system(location, temp_root) + else: + self.create_local_system(temp_root, location) + self.status(msg='Disk image has been created at %s' % location) + + def upgrade_local_system(self, raw_disk, temp_root): + mp = self.mount(raw_disk) + + version_label = self.get_version_label(mp) + self.status(msg='Updating image to a new version with label %s' % + version_label) + + version_root = os.path.join(mp, 'systems', version_label) + os.mkdir(version_root) + + old_orig = os.path.join(mp, 'systems', 'version1', 'orig') + new_orig = os.path.join(version_root, 'orig') + cliapp.runcmd( + ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig]) + + cliapp.runcmd( + ['rsync', '-a', '--checksum', '--numeric-ids', '--delete', + temp_root + os.path.sep, new_orig]) + + self.create_run(version_root) + + if self.bootloader_is_wanted(): + self.install_kernel(version_root, temp_root) + self.install_extlinux(mp, version_label) + + self.unmount(mp) + + def get_version_label(self, mp): + version_label = os.environ.get('VERSION_LABEL') + + if version_label is None: + self.unmount(mp) + raise cliapp.AppException('VERSION_LABEL was not given') + + if os.path.exists(os.path.join(mp, 'systems', version_label)): + self.unmount(mp) + raise cliapp.AppException('VERSION_LABEL %s already exists' + % version_label) + + return version_label RawDiskWriteExtension().run() -- cgit v1.2.1 From 2672ffe119deff9faafc7382842574297d30e497 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Thu, 23 May 2013 15:20:52 +0000 Subject: Use the name factory for the first system version. --- nfsboot.write | 2 +- rawdisk.write | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nfsboot.write b/nfsboot.write index e2ce7db2..cf71301b 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -66,7 +66,7 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): 'with hostname "baserock"') self.test_good_server(location) - version = 'version1' + version = 'factory' versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', version) self.copy_rootfs(temp_root, location, versioned_root, hostname) diff --git a/rawdisk.write b/rawdisk.write index 76438a5e..a43a9cce 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -59,7 +59,7 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): version_root = os.path.join(mp, 'systems', version_label) os.mkdir(version_root) - old_orig = os.path.join(mp, 'systems', 'version1', 'orig') + old_orig = os.path.join(mp, 'systems', 'factory', 'orig') new_orig = os.path.join(version_root, 'orig') cliapp.runcmd( ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig]) -- cgit v1.2.1 From ee54044ddaed11bd83161e98854e4411ebb91cde Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Fri, 24 May 2013 17:02:04 +0000 Subject: Fix kvm working without autostart --- kvm.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvm.write b/kvm.write index e2f7435c..ae287fe5 100755 --- a/kvm.write +++ b/kvm.write @@ -113,7 +113,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '--name', vm_name, '--vnc', '--ram=%s' % ram_mebibytes, '--disk path=%s,bus=ide' % vm_path] + attach_opts if not autostart: - cmdline += '--noreboot' + cmdline += ['--noreboot'] cliapp.runcmd(cmdline) -- cgit v1.2.1 From 9d911d9f79e98fa2059724352abd18f882f99387 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Fri, 24 May 2013 17:02:04 +0000 Subject: Fix kvm working without autostart --- kvm.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvm.write b/kvm.write index e2f7435c..ae287fe5 100755 --- a/kvm.write +++ b/kvm.write @@ -113,7 +113,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '--name', vm_name, '--vnc', '--ram=%s' % ram_mebibytes, '--disk path=%s,bus=ide' % vm_path] + attach_opts if not autostart: - cmdline += '--noreboot' + cmdline += ['--noreboot'] cliapp.runcmd(cmdline) -- cgit v1.2.1 From 6bd0b52aa907a27c355b2ab00a151757b9bb24fc Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Fri, 24 May 2013 15:44:57 +0000 Subject: Add ssh-rsync write extension This is used to perform upgrades on running baserock systems. It requires rsync on the target system --- ssh-rsync.write | 181 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100755 ssh-rsync.write diff --git a/ssh-rsync.write b/ssh-rsync.write new file mode 100755 index 00000000..6fe1153d --- /dev/null +++ b/ssh-rsync.write @@ -0,0 +1,181 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +'''A Morph deployment write extension for upgrading systems over ssh.''' + + +import cliapp +import os +import sys +import time +import tempfile + +import morphlib.writeexts + +class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): + + '''Upgrade a running baserock system with ssh and rsync. + + It assumes the system is baserock-based and has a btrfs partition. + + The location command line argument is the 'user@hostname' string + that will be passed to ssh and rsync + + ''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + + self.check_valid_target(location) + self.upgrade_remote_system(location, temp_root) + + def upgrade_remote_system(self, location, temp_root): + root_disk = self.find_root_disk(location) + version_label = os.environ.get('VERSION_LABEL') + + try: + self.status(msg='Creating remote mount point') + remote_mnt = cliapp.ssh_runcmd(location, ['mktemp', '-d']).strip() + + self.status(msg='Mounting root disk') + cliapp.ssh_runcmd(location, ['mount', root_disk, remote_mnt]) + + version_root = os.path.join(remote_mnt, 'systems', version_label) + run_dir = os.path.join(version_root, 'run') + orig_dir = os.path.join(version_root, 'orig') + try: + self.status(msg='Creating %s' % version_root) + cliapp.ssh_runcmd(location, ['mkdir', version_root]) + + self.create_remote_orig(location, version_root, remote_mnt, + temp_root) + + self.status(msg='Creating "run" subvolume') + cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', + 'snapshot', orig_dir, run_dir]) + + self.install_remote_kernel(location, version_root, temp_root) + except Exception as e: + try: + cliapp.ssh_runcmd(location, + ['btrfs', 'subvolume', 'delete', run_dir]) + cliapp.ssh_runcmd(location, + ['btrfs', 'subvolume', 'delete', orig_dir]) + cliapp.ssh_runcmd(location, ['rm', '-rf', version_root]) + except: + pass + raise e + + if self.bootloader_is_wanted(): + self.update_remote_extlinux(location, remote_mnt, + version_label) + except: + raise + else: + self.status(msg='Removing temporary mounts') + cliapp.ssh_runcmd(location, ['umount', root_disk]) + cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) + + def update_remote_extlinux(self, location, remote_mnt, version_label): + '''Install/reconfigure extlinux on location''' + + self.status(msg='Creating extlinux.conf') + config = os.path.join(remote_mnt, 'extlinux.conf') + temp_file = tempfile.mkstemp()[1] + with open(temp_file, 'w') as f: + f.write('default linux\n') + f.write('timeout 1\n') + f.write('label linux\n') + f.write('kernel /systems/' + version_label + '/kernel\n') + f.write('append root=/dev/sda ' + 'rootflags=subvol=systems/' + version_label + '/run ' + 'init=/sbin/init rw\n') + + cliapp.ssh_runcmd(location, ['mv', config, config+'~']) + + try: + cliapp.runcmd(['rsync', '-a', temp_file, + '%s:%s' % (location, config)]) + except Exception as e: + try: + cliapp.ssh_runcmd(location, ['mv', config+'~', config]) + except: + pass + raise e + + def create_remote_orig(self, location, version_root, remote_mnt, + temp_root): + '''Create the subvolume version_root/orig on location''' + + self.status(msg='Creating "orig" subvolume') + old_orig = self.get_old_orig(location, remote_mnt) + new_orig = os.path.join(version_root, 'orig') + cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', 'snapshot', + old_orig, new_orig]) + + cliapp.runcmd(['rsync', '-a', '--checksum', '--numeric-ids', + '--delete', temp_root, '%s:%s' % (location, new_orig)]) + + def get_old_orig(self, location, remote_mnt): + '''Identify which subvolume to snapshot from''' + + # rawdisk upgrades use 'factory' + return os.path.join(remote_mnt, 'systems', 'factory', 'orig') + + def find_root_disk(self, location): + '''Read /proc/mounts on location to find which device contains "/"''' + + self.status(msg='Finding device that contains "/"') + contents = cliapp.ssh_runcmd(location, ['cat', '/proc/mounts']) + for line in contents.splitlines(): + line_words = line.split() + if (line_words[1] == '/' and line_words[0] != 'rootfs'): + return line_words[0] + + def install_remote_kernel(self, location, version_root, temp_root): + '''Install the kernel in temp_root inside version_root on location''' + + self.status(msg='Installing kernel') + image_names = ['vmlinuz', 'zImage', 'uImage'] + kernel_dest = os.path.join(version_root, 'kernel') + for name in image_names: + try_path = os.path.join(temp_root, 'boot', name) + if os.path.exists(try_path): + cliapp.runcmd(['rsync', '-a', try_path, + '%s:%s' % (location, kernel_dest)]) + + def check_valid_target(self, location): + try: + cliapp.ssh_runcmd(location, ['true']) + except Exception as e: + raise cliapp.AppException('%s does not respond to ssh:\n%s' + % (location, e)) + + try: + cliapp.ssh_runcmd(location, ['test', '-d', '/baserock']) + except: + raise cliapp.AppException('%s is not a baserock system' % location) + + try: + cliapp.ssh_runcmd(location, ['which', 'rsync']) + except: + raise cliapp.AppException('%s does not have rsync') + +SshRsyncWriteExtension().run() -- cgit v1.2.1 From 308b1b2a6545566c813e66da5b354175c3cef168 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Thu, 30 May 2013 11:51:21 +0100 Subject: Add tar write extension --- tar.write | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100755 tar.write diff --git a/tar.write b/tar.write new file mode 100755 index 00000000..5b775e52 --- /dev/null +++ b/tar.write @@ -0,0 +1,19 @@ +#!/bin/sh +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +# A Morph write extension to deploy to a .tar file + +tar -C "$1" -cf "$2" -- cgit v1.2.1 From 053333fdac45e58639726eef4726b47d6a2d6385 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Thu, 30 May 2013 11:54:48 +0100 Subject: Tar write extension fails if arguments not set --- tar.write | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tar.write b/tar.write index 5b775e52..7a2f01e1 100755 --- a/tar.write +++ b/tar.write @@ -16,4 +16,6 @@ # A Morph write extension to deploy to a .tar file +set -eu + tar -C "$1" -cf "$2" -- cgit v1.2.1 From b8f4f02829303a5bbd06c0343f34c5354c2d8a0b Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Fri, 31 May 2013 15:57:53 +0100 Subject: Fix tar write extension --- tar.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tar.write b/tar.write index 7a2f01e1..333626b5 100755 --- a/tar.write +++ b/tar.write @@ -18,4 +18,4 @@ set -eu -tar -C "$1" -cf "$2" +tar -C "$1" -cf "$2" . -- cgit v1.2.1 From 7412ca5200695a99983c0bfb98a708808be6d639 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Mon, 3 Jun 2013 15:47:49 +0000 Subject: kvm.write: use ssh_runcmd --- kvm.write | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kvm.write b/kvm.write index ae287fe5..4bfff8c4 100755 --- a/kvm.write +++ b/kvm.write @@ -108,13 +108,12 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): ram_mebibytes = str(self.get_ram_size() / (1024**2)) - cmdline = ['ssh', ssh_host, - 'virt-install', '--connect qemu:///system', '--import', + cmdline = ['virt-install', '--connect', 'qemu:///system', '--import', '--name', vm_name, '--vnc', '--ram=%s' % ram_mebibytes, - '--disk path=%s,bus=ide' % vm_path] + attach_opts + '--disk', 'path=%s,bus=ide' % vm_path] + attach_opts if not autostart: cmdline += ['--noreboot'] - cliapp.runcmd(cmdline) + cliapp.ssh_runcmd(ssh_host, cmdline) KvmPlusSshWriteExtension().run() -- cgit v1.2.1 From 480aea39aa2b9298e0828bff1c2b1a2ec61a2124 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Mon, 3 Jun 2013 15:49:53 +0000 Subject: kvm.write: Set VM to autostart if specified --- kvm.write | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kvm.write b/kvm.write index 4bfff8c4..f2683d8e 100755 --- a/kvm.write +++ b/kvm.write @@ -115,6 +115,9 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): cmdline += ['--noreboot'] cliapp.ssh_runcmd(ssh_host, cmdline) + if autostart: + cliapp.ssh_runcmd(ssh_host, + ['virsh', '--connect', 'qemu:///system', 'autostart', vm_name]) KvmPlusSshWriteExtension().run() -- cgit v1.2.1 From c3c678ccd9fc2cc898b3d6d14cc15711fd992b9e Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Wed, 29 May 2013 16:35:36 +0000 Subject: Improve network setup of the virtualbox write extension. The VirtualBox deployment write extension will configure networking in the following manner: - One host-only network interface will be used to group the virtual machines together (and to give access between the host and the virtual machines). It will be bound to eth0 of the virtual machine. - One NAT (or Bridge) network interface will be used to allow the virtual machines access to the wider network. This will be bound to eth1 of the virtual machine. Now deployment to virtual box will require that both HOST_IPADDR and NETMASK environment variables be set, and also configuration for eth0 and eth1 is expected in the NETWORK_CONFIG environment variable. This commit also replaces some run_cmd with ssh_runcmd. --- virtualbox-ssh.write | 72 +++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 65 insertions(+), 7 deletions(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index cb17b69b..3ee2eae0 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -101,24 +101,26 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): self.status(msg='Transfer disk and convert to VDI') with open(raw_disk, 'rb') as f: - cliapp.runcmd( - ['ssh', ssh_host, - 'VBoxManage', 'convertfromraw', 'stdin', vdi_path, + cliapp.ssh_runcmd(ssh_host, + ['VBoxManage', 'convertfromraw', 'stdin', vdi_path, str(os.path.getsize(raw_disk))], stdin=f) def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart): '''Create the VirtualBox virtual machine.''' - + self.status(msg='Create VirtualBox virtual machine') ram_mebibytes = str(self.get_ram_size() / (1024**2)) + hostonly_iface = self.get_host_interface(ssh_host) + commands = [ ['createvm', '--name', vm_name, '--ostype', 'Linux26_64', '--register'], ['modifyvm', vm_name, '--ioapic', 'on', '--memory', ram_mebibytes, - '--nic1', 'nat'], + '--nic1', 'hostonly', '--hostonlyadapter1', hostonly_iface, + '--nic2', 'nat', '--natnet2', 'default'], ['storagectl', vm_name, '--name', '"SATA Controller"', '--add', 'sata', '--bootable', 'on', '--sataportcount', '2'], ['storageattach', vm_name, '--storagectl', '"SATA Controller"', @@ -140,9 +142,65 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): commands.append(['startvm', vm_name]) for command in commands: - argv = ['ssh', ssh_host, 'VBoxManage'] + command - cliapp.runcmd(argv) + argv = ['VBoxManage'] + command + cliapp.ssh_runcmd(ssh_host, argv) + + def get_host_interface(self, ssh_host): + host_ipaddr = os.environ.get('HOST_IPADDR') + netmask = os.environ.get('NETMASK') + network_config = os.environ.get("NETWORK_CONFIG") + if network_config is None: + raise cliapp.AppException('NETWORK_CONFIG was not given') + + if "eth0:" not in network_config: + raise cliapp.AppException( + 'NETWORK_CONFIG does not contain ' + 'the eth0 configuration') + + if "eth1:" not in network_config: + raise cliapp.AppException( + 'NETWORK_CONFIG does not contain ' + 'the eth1 configuration') + + if host_ipaddr is None: + raise cliapp.AppException('HOST_IPADDR was not given') + + if netmask is None: + raise cliapp.AppException('NETMASK was not given') + + # 'VBoxManage list hostonlyifs' retrieves a list with the hostonly + # interfaces on the host. For each interface, the following lines + # are shown on top: + # + # Name: vboxnet0 + # GUID: 786f6276-656e-4074-8000-0a0027000000 + # Dhcp: Disabled + # IPAddress: 192.168.100.1 + # + # The following command tries to retrieve the hostonly interface + # name (e.g. vboxnet0) associated with the given ip address. + iface = None + lines = cliapp.ssh_runcmd(ssh_host, + ['VBoxManage', 'list', 'hostonlyifs']).splitlines() + for i, v in enumerate(lines): + if host_ipaddr in v: + iface = lines[i-3].split()[1] + break + + if iface is None: + iface = cliapp.ssh_runcmd(ssh_host, + ['VBoxManage', 'hostonlyif', 'create']) + # 'VBoxManage hostonlyif create' shows the name of the + # created hostonly interface inside single quotes + iface = iface[iface.find("'") + 1 : iface.rfind("'")] + cliapp.ssh_runcmd(ssh_host, + ['VBoxManage', 'hostonlyif', + 'ipconfig', iface, + '--ip', host_ipaddr, + '--netmask', netmask]) + + return iface VirtualBoxPlusSshWriteExtension().run() -- cgit v1.2.1 From 72e2d820516ef534c5b557703a973e0499ac398a Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Thu, 6 Jun 2013 11:14:21 +0100 Subject: Add the install-files extension to morph --- install-files.configure | 112 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100755 install-files.configure diff --git a/install-files.configure b/install-files.configure new file mode 100755 index 00000000..669fc518 --- /dev/null +++ b/install-files.configure @@ -0,0 +1,112 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +''' A Morph configuration extension for adding arbitrary files to a system + +It will read the manifest files specified in the environment variable +INSTALL_FILES, then use the contens of those files to determine which files +to install into the target system. + +''' + +import cliapp +import os +import re +import sys +import shlex +import shutil +import stat + +class InstallFilesConfigureExtension(cliapp.Application): + + '''Install the files specified in the manifests listed in INSTALL_FILES + + The manifest is formatted as: + + + + Where the filename is how the file is found inside whatever directory + the manifest is stored in, and also the path within the system to + install to. + + Directories on the target must be created if they do not exist. + + This extension supports files, symlinks and directories. + + ''' + + def process_args(self, args): + if not 'INSTALL_FILES' in os.environ: + return + target_root = args[0] + manifests = shlex.split(os.environ['INSTALL_FILES']) + for manifest in manifests: + self.install_manifest(manifest, target_root) + + def install_manifest(self, manifest, target_root): + manifest_dir = os.path.dirname(manifest) + with open(manifest) as f: + entries = f.readlines() + for entry in entries: + self.install_entry(entry, manifest_dir, target_root) + + def install_entry(self, entry, manifest_root, target_root): + entry_data = re.split('\W+', entry.strip(), maxsplit=3) + mode = int(entry_data[0], 8) + uid = int(entry_data[1]) + gid = int(entry_data[2]) + path = entry_data[3] + dest_path = os.path.join(target_root, './' + path) + if stat.S_ISDIR(mode): + if os.path.exists(dest_path): + dest_stat = os.stat(dest_path) + if (mode != dest_stat.st_mode + or uid != dest_stat.st_uid + or gid != dest_stat.st_gid): + raise cliapp.AppException('"%s" exists and is not ' + 'identical to directory ' + '"%s"' % (dest_path, entry)) + else: + os.mkdir(dest_path, mode) + os.chown(dest_path, uid, gid) + os.chmod(dest_path, mode) + + elif stat.S_ISLNK(mode): + if os.path.lexists(dest_path): + raise cliapp.AppException('Symlink already exists at %s' + % dest_path) + else: + linkdest = os.readlink(os.path.join(manifest_root, + './' + path)) + os.symlink(linkdest, dest_path) + os.lchown(dest_path, uid, gid) + + elif stat.S_ISREG(mode): + if os.path.lexists(dest_path): + raise cliapp.AppException('File already exists at %s' + % dest_path) + else: + shutil.copyfile(os.path.join(manifest_root, './' + path), + dest_path) + os.chown(dest_path, uid, gid) + os.chmod(dest_path, mode) + + else: + raise cliapp.AppException('Mode given in "%s" is not a file,' + ' symlink or directory' % entry) + +InstallFilesConfigureExtension().run() -- cgit v1.2.1 From 221ca74c31a7be787bfa354f53c292f532fa2e2d Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Thu, 13 Jun 2013 10:40:09 +0000 Subject: Set up a symlink to the default system version in rawdisk/kvm/vbox deployments Also Change them to use the "default" symlink in the extlinux.conf they create, instead of hardcoding the current system version name --- rawdisk.write | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/rawdisk.write b/rawdisk.write index a43a9cce..62d39b31 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -70,9 +70,17 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): self.create_run(version_root) + default_path = os.path.join(mp, 'systems', 'default') + if os.path.exists(default_path): + os.remove(default_path) + else: + # we are upgrading and old system that does + # not have an updated extlinux config file + self.install_extlinux(mp) + os.symlink(version_label, default_path) + if self.bootloader_is_wanted(): self.install_kernel(version_root, temp_root) - self.install_extlinux(mp, version_label) self.unmount(mp) -- cgit v1.2.1 From f041f27a744ad7fa9d95f59d29026a192cd4aa09 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Thu, 13 Jun 2013 15:01:40 +0000 Subject: Change the symbolic link target and directory the default system on NFS server. With this we'll have "default -> factory" instead of "default-run" -> "factory/run". Also change to use VERSION_LABEL instead of VERSION and "factory" instead of "version1", to be more consistent with other parts of the code. --- nfsboot.write | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/nfsboot.write b/nfsboot.write index 61c5306a..f43d3c98 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -66,15 +66,15 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): 'with hostname "baserock"') self.test_good_server(location) - version = os.environ['VERSION'] or 'version1' + version_label = os.getenv('VERSION_LABEL', 'factory') versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', - version) + version_label) if self.version_exists(versioned_root, location): raise cliapp.AppException('Version %s already exists on' ' this device. Deployment aborted' - % version) + % version_label) self.copy_rootfs(temp_root, location, versioned_root, hostname) - self.copy_kernel(temp_root, location, versioned_root, version, + self.copy_kernel(temp_root, location, versioned_root, version_label, hostname) self.configure_nfs(location, hostname) @@ -171,15 +171,15 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Could not create \'run\' rootfs' ' from \'orig\'') - self.status(msg='Linking \'default-run\' to latest system') + self.status(msg='Linking \'default\' to latest system') try: cliapp.ssh_runcmd('root@%s' % location, - ['ln', '-sfn', run_path, + ['ln', '-sfn', versioned_root, os.path.join(self._nfsboot_root, hostname, 'systems', - 'default-run')]) + 'default')]) except cliapp.AppException: - raise cliapp.AppException('Could not link \'default-run\' to %s' - % run_path) + raise cliapp.AppException('Could not link \'default\' to %s' + % versioned_root) def configure_nfs(self, location, hostname): exported_path = os.path.join(self._nfsboot_root, hostname) -- cgit v1.2.1 From bdca86375dd2d7fe53c5404db413656d53989bc9 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Fri, 14 Jun 2013 19:15:09 +0000 Subject: Only upgrade an older extlinux configuration if we want one. --- rawdisk.write | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rawdisk.write b/rawdisk.write index 62d39b31..a74d6905 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -76,7 +76,8 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): else: # we are upgrading and old system that does # not have an updated extlinux config file - self.install_extlinux(mp) + if self.bootloader_is_wanted(): + self.install_extlinux(mp) os.symlink(version_label, default_path) if self.bootloader_is_wanted(): -- cgit v1.2.1 From 64b72cdc4ae7a0d376239f31d1e607bae9d8d602 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Tue, 18 Jun 2013 16:09:51 +0100 Subject: Create a symbolic link to the default system version when upgrading running systems. --- ssh-rsync.write | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index 6fe1153d..4348714c 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -72,6 +72,10 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): 'snapshot', orig_dir, run_dir]) self.install_remote_kernel(location, version_root, temp_root) + default_path = os.path.join(remote_mnt, 'systems', 'default') + cliapp.ssh_runcmd(location, ['ln', '-s', '-f', + version_label, + default_path]) except Exception as e: try: cliapp.ssh_runcmd(location, @@ -103,9 +107,9 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): f.write('default linux\n') f.write('timeout 1\n') f.write('label linux\n') - f.write('kernel /systems/' + version_label + '/kernel\n') + f.write('kernel /systems/default/kernel\n') f.write('append root=/dev/sda ' - 'rootflags=subvol=systems/' + version_label + '/run ' + 'rootflags=subvol=systems/default/run ' 'init=/sbin/init rw\n') cliapp.ssh_runcmd(location, ['mv', config, config+'~']) -- cgit v1.2.1 From 251d6a684eda959057810e736184eac316e80c75 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Tue, 18 Jun 2013 16:10:42 +0100 Subject: Support upgrades in older running versions. Verify if are using and older extlinux configuration and upgrade it if the case, by checking if the "default" symbolic link exists on the target. Note that with the symbolic link we don't need to update extlinux configuration after an upgrade --- ssh-rsync.write | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index 4348714c..1a921996 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -73,6 +73,17 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): self.install_remote_kernel(location, version_root, temp_root) default_path = os.path.join(remote_mnt, 'systems', 'default') + if self.bootloader_is_wanted(): + output = ssh_runcmd(location, ['sh', '-c', + 'test -e "$1" && stat -c %F "$1"' + ' || ' + 'echo missing file', + '-', default_path]) + if output != "symbolic link": + # we are upgrading and old system that does + # not have an updated extlinux config file + self.update_remote_extlinux(location, remote_mnt, + version_label) cliapp.ssh_runcmd(location, ['ln', '-s', '-f', version_label, default_path]) @@ -87,9 +98,6 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): pass raise e - if self.bootloader_is_wanted(): - self.update_remote_extlinux(location, remote_mnt, - version_label) except: raise else: -- cgit v1.2.1 From 5e664629324a2cab7b4b79c01d458cc00c38e9c4 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Wed, 19 Jun 2013 12:59:34 +0100 Subject: Fix a typo --- ssh-rsync.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index 1a921996..6bef51db 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -74,7 +74,7 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): self.install_remote_kernel(location, version_root, temp_root) default_path = os.path.join(remote_mnt, 'systems', 'default') if self.bootloader_is_wanted(): - output = ssh_runcmd(location, ['sh', '-c', + output = cliapp.ssh_runcmd(location, ['sh', '-c', 'test -e "$1" && stat -c %F "$1"' ' || ' 'echo missing file', -- cgit v1.2.1 From 0b1db252d61c3aa02ae7c865314ee07e96244be3 Mon Sep 17 00:00:00 2001 From: Jonathan Maw Date: Wed, 19 Jun 2013 12:23:11 +0000 Subject: kvm.write uses NIC_CONFIG --- kvm.write | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kvm.write b/kvm.write index f2683d8e..52768042 100755 --- a/kvm.write +++ b/kvm.write @@ -106,6 +106,11 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): for disk in attach_disks: attach_opts.extend(['--disk', 'path=%s' % disk]) + if 'NIC_CONFIG' in os.environ: + nics = os.environ['NIC_CONFIG'].split() + for nic in nics: + attach_opts.extend(['--network', nic]) + ram_mebibytes = str(self.get_ram_size() / (1024**2)) cmdline = ['virt-install', '--connect', 'qemu:///system', '--import', -- cgit v1.2.1 From 3e8721c40abdc474ad3431d62d102e10aee7488f Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Sun, 9 Jun 2013 22:56:33 +0000 Subject: Write extensions: pass -s to rsync -s, or --protect-args prevents the file path components of destination or source paths being interpreted by the remote shell. This is for wildcards or other shell features, but it breaks when paths have whitespace. We tend to always use absolute paths, so all uses of rsync now pass -s. kvm.write needs it, since the disk can be written to a path with spaces. Nfsboot and ssh-rsync need it because version labels are used, which may have spaces, and temporary directories are used, which could have spaces in weird TMPDIR configurations. --- kvm.write | 2 +- nfsboot.write | 4 ++-- ssh-rsync.write | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/kvm.write b/kvm.write index 52768042..67ac40e7 100755 --- a/kvm.write +++ b/kvm.write @@ -94,7 +94,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): self.status(msg='Transferring disk image') target = '%s:%s' % (ssh_host, vm_path) with open(raw_disk, 'rb') as f: - cliapp.runcmd(['rsync', '-zS', raw_disk, target]) + cliapp.runcmd(['rsync', '-szS', raw_disk, target]) def create_libvirt_guest(self, ssh_host, vm_name, vm_path, autostart): '''Create the libvirt virtual machine.''' diff --git a/nfsboot.write b/nfsboot.write index f43d3c98..34a72972 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -118,7 +118,7 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): rsync_dest = 'root@%s:%s' % (location, kernel_dest) self.status(msg='Copying kernel') cliapp.runcmd( - ['rsync', kernel_src, rsync_dest]) + ['rsync', '-s', kernel_src, rsync_dest]) # Link the kernel to the right place self.status(msg='Creating links to kernel in tftp directory') @@ -153,7 +153,7 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): self.status(msg='Creating \'orig\' rootfs') cliapp.runcmd( - ['rsync', '-aXSPH', '--delete', rootfs_src, + ['rsync', '-asXSPH', '--delete', rootfs_src, 'root@%s:%s' % (location, orig_path)]) self.status(msg='Creating \'run\' rootfs') diff --git a/ssh-rsync.write b/ssh-rsync.write index 6bef51db..fba550cd 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -123,7 +123,7 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): cliapp.ssh_runcmd(location, ['mv', config, config+'~']) try: - cliapp.runcmd(['rsync', '-a', temp_file, + cliapp.runcmd(['rsync', '-as', temp_file, '%s:%s' % (location, config)]) except Exception as e: try: @@ -142,7 +142,7 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig]) - cliapp.runcmd(['rsync', '-a', '--checksum', '--numeric-ids', + cliapp.runcmd(['rsync', '-as', '--checksum', '--numeric-ids', '--delete', temp_root, '%s:%s' % (location, new_orig)]) def get_old_orig(self, location, remote_mnt): @@ -170,7 +170,7 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): for name in image_names: try_path = os.path.join(temp_root, 'boot', name) if os.path.exists(try_path): - cliapp.runcmd(['rsync', '-a', try_path, + cliapp.runcmd(['rsync', '-as', try_path, '%s:%s' % (location, kernel_dest)]) def check_valid_target(self, location): -- cgit v1.2.1 From 2a799319bd19ce9d303aa63d30ab7c556b17b6bb Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Sat, 15 Jun 2013 09:33:10 +0000 Subject: Don't dereference the default symbolic link when updating it Or else this ln -s -f update1 /mp/systems/default will do this '/pp/systems/default/update1' -> 'update1 When we want '/pp/systems/default' -> 'update1 --- ssh-rsync.write | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index fba550cd..83091c4b 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -84,8 +84,7 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): # not have an updated extlinux config file self.update_remote_extlinux(location, remote_mnt, version_label) - cliapp.ssh_runcmd(location, ['ln', '-s', '-f', - version_label, + cliapp.ssh_runcmd(location, ['ln', '-sfn', version_label, default_path]) except Exception as e: try: -- cgit v1.2.1 From 89ad5f816fff7bd7897b2d4cb02ae5cc6b6799d2 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Sat, 15 Jun 2013 10:20:31 +0000 Subject: Unmount the remote mouting point instead of the root disk Unmounting the root disk as the side effect of turn it to be read only --- ssh-rsync.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index 83091c4b..77266d33 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -101,7 +101,7 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): raise else: self.status(msg='Removing temporary mounts') - cliapp.ssh_runcmd(location, ['umount', root_disk]) + cliapp.ssh_runcmd(location, ['umount', remote_mnt]) cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) def update_remote_extlinux(self, location, remote_mnt, version_label): -- cgit v1.2.1 From d8a87880248ec754affc302fa8966bf5ebd83046 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Sun, 16 Jun 2013 11:41:32 +0000 Subject: Add a missing trailing slash to the source directory of rsync Accordingly the rsync manual: "A trailing slash on the source changes this behavior to avoid creating an additional directory level at the destination. You can think of a trailing / on a source as meaning "copy the contents of this directory" as opposed to "copy the directory by name". --- ssh-rsync.write | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index 77266d33..b8d30e22 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -142,7 +142,8 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): old_orig, new_orig]) cliapp.runcmd(['rsync', '-as', '--checksum', '--numeric-ids', - '--delete', temp_root, '%s:%s' % (location, new_orig)]) + '--delete', temp_root + os.path.sep, + '%s:%s' % (location, new_orig)]) def get_old_orig(self, location, remote_mnt): '''Identify which subvolume to snapshot from''' -- cgit v1.2.1 From 4f630811332b7ebb21fc47551bccb8e14a456410 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Sat, 15 Jun 2013 15:48:01 +0000 Subject: Run the merge mode of baserock-system-config-sync when upgrading running systems. --- ssh-rsync.write | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ssh-rsync.write b/ssh-rsync.write index b8d30e22..9697e21b 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -71,6 +71,15 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', 'snapshot', orig_dir, run_dir]) + self.status(msg='Updating system configuration') + bscs_loc = os.path.join(run_dir, 'usr', 'bin', + 'baserock-system-config-sync') + try: + cliapp.ssh_runcmd(location, ['sh', bscs_loc, 'merge', + version_label]) + except: + self.status(msg='Updating system configuration failed') + self.install_remote_kernel(location, version_root, temp_root) default_path = os.path.join(remote_mnt, 'systems', 'default') if self.bootloader_is_wanted(): -- cgit v1.2.1 From ce80fe3e235ff747afbea9b20f992f5af41fe946 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Fri, 28 Jun 2013 15:00:36 +0000 Subject: Improvements to ssh-rsync extension --- ssh-rsync.write | 150 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 80 insertions(+), 70 deletions(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index 9697e21b..211dbe5e 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -50,65 +50,74 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): root_disk = self.find_root_disk(location) version_label = os.environ.get('VERSION_LABEL') + self.status(msg='Creating remote mount point') + remote_mnt = cliapp.ssh_runcmd(location, ['mktemp', '-d']).strip() try: - self.status(msg='Creating remote mount point') - remote_mnt = cliapp.ssh_runcmd(location, ['mktemp', '-d']).strip() - self.status(msg='Mounting root disk') cliapp.ssh_runcmd(location, ['mount', root_disk, remote_mnt]) + except Exception as e: + try: + cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) + except: + pass + raise e + try: version_root = os.path.join(remote_mnt, 'systems', version_label) run_dir = os.path.join(version_root, 'run') orig_dir = os.path.join(version_root, 'orig') + + self.status(msg='Creating %s' % version_root) + cliapp.ssh_runcmd(location, ['mkdir', version_root]) + + self.create_remote_orig(location, version_root, remote_mnt, + temp_root) + + self.status(msg='Creating "run" subvolume') + cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', + 'snapshot', orig_dir, run_dir]) + + self.status(msg='Updating system configuration') + bscs_loc = os.path.join(run_dir, 'usr', 'bin', + 'baserock-system-config-sync') + + output = cliapp.ssh_runcmd(location, ['sh', '-c', + '"$1" merge "$2" &> /dev/null || echo -n cmdfailed', + '-', bscs_loc, version_label]) + if output == "cmdfailed": + self.status(msg='Updating system configuration failed') + + self.install_remote_kernel(location, version_root, temp_root) + default_path = os.path.join(remote_mnt, 'systems', 'default') + if self.bootloader_is_wanted(): + output = cliapp.ssh_runcmd(location, ['sh', '-c', + 'test -e "$1" && stat -c %F "$1" ' + '|| echo missing file', + '-', default_path]) + if output != "symbolic link": + # we are upgrading and old system that does + # not have an updated extlinux config file + self.update_remote_extlinux(location, remote_mnt, + version_label) + cliapp.ssh_runcmd(location, ['ln', '-sfn', version_label, + default_path]) + except Exception as e: + try: + cliapp.ssh_runcmd(location, + ['btrfs', 'subvolume', 'delete', run_dir]) + except: + pass + try: + cliapp.ssh_runcmd(location, + ['btrfs', 'subvolume', 'delete', orig_dir]) + except: + pass try: - self.status(msg='Creating %s' % version_root) - cliapp.ssh_runcmd(location, ['mkdir', version_root]) - - self.create_remote_orig(location, version_root, remote_mnt, - temp_root) - - self.status(msg='Creating "run" subvolume') - cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', - 'snapshot', orig_dir, run_dir]) - - self.status(msg='Updating system configuration') - bscs_loc = os.path.join(run_dir, 'usr', 'bin', - 'baserock-system-config-sync') - try: - cliapp.ssh_runcmd(location, ['sh', bscs_loc, 'merge', - version_label]) - except: - self.status(msg='Updating system configuration failed') - - self.install_remote_kernel(location, version_root, temp_root) - default_path = os.path.join(remote_mnt, 'systems', 'default') - if self.bootloader_is_wanted(): - output = cliapp.ssh_runcmd(location, ['sh', '-c', - 'test -e "$1" && stat -c %F "$1"' - ' || ' - 'echo missing file', - '-', default_path]) - if output != "symbolic link": - # we are upgrading and old system that does - # not have an updated extlinux config file - self.update_remote_extlinux(location, remote_mnt, - version_label) - cliapp.ssh_runcmd(location, ['ln', '-sfn', version_label, - default_path]) - except Exception as e: - try: - cliapp.ssh_runcmd(location, - ['btrfs', 'subvolume', 'delete', run_dir]) - cliapp.ssh_runcmd(location, - ['btrfs', 'subvolume', 'delete', orig_dir]) - cliapp.ssh_runcmd(location, ['rm', '-rf', version_root]) - except: - pass - raise e - - except: - raise - else: + cliapp.ssh_runcmd(location, ['rm', '-rf', version_root]) + except: + pass + raise e + finally: self.status(msg='Removing temporary mounts') cliapp.ssh_runcmd(location, ['umount', remote_mnt]) cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) @@ -118,8 +127,8 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): self.status(msg='Creating extlinux.conf') config = os.path.join(remote_mnt, 'extlinux.conf') - temp_file = tempfile.mkstemp()[1] - with open(temp_file, 'w') as f: + temp_fd, temp_path = tempfile.mkstemp() + with os.fdopen(temp_fd, 'w') as f: f.write('default linux\n') f.write('timeout 1\n') f.write('label linux\n') @@ -128,14 +137,13 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): 'rootflags=subvol=systems/default/run ' 'init=/sbin/init rw\n') - cliapp.ssh_runcmd(location, ['mv', config, config+'~']) - try: - cliapp.runcmd(['rsync', '-as', temp_file, - '%s:%s' % (location, config)]) + cliapp.runcmd(['rsync', '-as', temp_path, + '%s:%s~' % (location, config)]) + cliapp.ssh_runcmd(location, ['mv', config+'~', config]) except Exception as e: try: - cliapp.ssh_runcmd(location, ['mv', config+'~', config]) + cliapp.ssh_runcmd(location, ['rm', '-f', config+'~']) except: pass raise e @@ -168,19 +176,19 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): for line in contents.splitlines(): line_words = line.split() if (line_words[1] == '/' and line_words[0] != 'rootfs'): - return line_words[0] + return line_words[0] def install_remote_kernel(self, location, version_root, temp_root): '''Install the kernel in temp_root inside version_root on location''' self.status(msg='Installing kernel') - image_names = ['vmlinuz', 'zImage', 'uImage'] + image_names = ('vmlinuz', 'zImage', 'uImage') kernel_dest = os.path.join(version_root, 'kernel') for name in image_names: try_path = os.path.join(temp_root, 'boot', name) if os.path.exists(try_path): cliapp.runcmd(['rsync', '-as', try_path, - '%s:%s' % (location, kernel_dest)]) + '%s:%s' % (location, kernel_dest)]) def check_valid_target(self, location): try: @@ -189,14 +197,16 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('%s does not respond to ssh:\n%s' % (location, e)) - try: - cliapp.ssh_runcmd(location, ['test', '-d', '/baserock']) - except: - raise cliapp.AppException('%s is not a baserock system' % location) - - try: - cliapp.ssh_runcmd(location, ['which', 'rsync']) - except: - raise cliapp.AppException('%s does not have rsync') + output = cliapp.ssh_runcmd(location, ['sh', '-c', + 'test -d /baserock || echo -n dirnotfound']) + if output == 'dirnotfound': + raise cliapp.AppException('%s is not a baserock system' + % location) + + output = cliapp.ssh_runcmd(location, ['sh', '-c', + 'type rsync &> /dev/null || echo -n cmdnotfound']) + if output == 'cmdnotfound': + raise cliapp.AppException('%s does not have rsync' + % location) SshRsyncWriteExtension().run() -- cgit v1.2.1 From 7f10cd9a320664609f83dc24f6d25e79b49cb7d2 Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Tue, 2 Jul 2013 07:24:30 +0000 Subject: Allow to set the number of cpus for virtualbox and kvm deployments. --- kvm.write | 7 +++++-- virtualbox-ssh.write | 5 ++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/kvm.write b/kvm.write index 67ac40e7..9a6be135 100755 --- a/kvm.write +++ b/kvm.write @@ -113,8 +113,11 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): ram_mebibytes = str(self.get_ram_size() / (1024**2)) - cmdline = ['virt-install', '--connect', 'qemu:///system', '--import', - '--name', vm_name, '--vnc', '--ram=%s' % ram_mebibytes, + vcpu_count = str(self.get_vcpu_count()) + + cmdline = ['virt-install', '--connect', 'qemu:///system', + '--import', '--name', vm_name, '--vnc', + '--ram', ram_mebibytes, '--vcpus', vcpu_count, '--disk', 'path=%s,bus=ide' % vm_path] + attach_opts if not autostart: cmdline += ['--noreboot'] diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 3ee2eae0..1abe233e 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -113,12 +113,15 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): ram_mebibytes = str(self.get_ram_size() / (1024**2)) + vcpu_count = str(self.get_vcpu_count()) + hostonly_iface = self.get_host_interface(ssh_host) commands = [ ['createvm', '--name', vm_name, '--ostype', 'Linux26_64', '--register'], - ['modifyvm', vm_name, '--ioapic', 'on', '--memory', ram_mebibytes, + ['modifyvm', vm_name, '--ioapic', 'on', + '--memory', ram_mebibytes, '--cpus', vcpu_count, '--nic1', 'hostonly', '--hostonlyadapter1', hostonly_iface, '--nic2', 'nat', '--natnet2', 'default'], ['storagectl', vm_name, '--name', '"SATA Controller"', -- cgit v1.2.1 From f0800ade5348fd632d7c49307e55e32746d073a7 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Mon, 19 Aug 2013 10:23:31 +0000 Subject: Fix naming of SATA Controller in VirtualBox deployments Patch from Paul Sherwood. --- virtualbox-ssh.write | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 1abe233e..2374db31 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -124,9 +124,9 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '--memory', ram_mebibytes, '--cpus', vcpu_count, '--nic1', 'hostonly', '--hostonlyadapter1', hostonly_iface, '--nic2', 'nat', '--natnet2', 'default'], - ['storagectl', vm_name, '--name', '"SATA Controller"', + ['storagectl', vm_name, '--name', 'SATA Controller', '--add', 'sata', '--bootable', 'on', '--sataportcount', '2'], - ['storageattach', vm_name, '--storagectl', '"SATA Controller"', + ['storageattach', vm_name, '--storagectl', 'SATA Controller', '--port', '0', '--device', '0', '--type', 'hdd', '--medium', vdi_path], ] @@ -134,7 +134,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): attach_disks = self.parse_attach_disks() for device_no, disk in enumerate(attach_disks, 1): cmd = ['storageattach', vm_name, - '--storagectl', '"SATA Controller"', + '--storagectl', 'SATA Controller', '--port', str(device_no), '--device', '0', '--type', 'hdd', -- cgit v1.2.1 From f6e74b24509e35938fbbb0e7239eb8e4644afb31 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Thu, 29 Aug 2013 16:35:26 +0000 Subject: exts: Add openstack configure/write exts openstackssh.write: Write extension which deploy a raw image of baserock directly to an OpenStack machine using python-glanceclient. The raw image deployed has modified its bootloader to use virtio disks. vdaboot.configure: Configuration extension to change the mount point of "/" to use virtio disks (/dev/vda). --- openstack.write | 140 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ vdaboot.configure | 34 +++++++++++++ 2 files changed, 174 insertions(+) create mode 100755 openstack.write create mode 100755 vdaboot.configure diff --git a/openstack.write b/openstack.write new file mode 100755 index 00000000..8ee8767e --- /dev/null +++ b/openstack.write @@ -0,0 +1,140 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +'''A Morph deployment write extension for deploying to OpenStack.''' + + +import cliapp +import os +import tempfile +import urlparse + +import morphlib.writeexts + + +class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): + + '''Configure a raw disk image into an OpenStack host. + + The raw disk image is created during Morph's deployment and the + image is deployed in OpenStack using python-glanceclient. + + The location command line argument is the authentification url + of the OpenStack server using the following syntax: + + http://HOST:PORT/VERSION + + where + + * HOST is the host running OpenStack + * PORT is the port which is using OpenStack for authentifications. + * VERSION is the authentification version of OpenStack (Only v2.0 + supported) + + This extension needs in the environment the following variables: + + * OPENSTACK_USER is the username to use in the deployment. + * OPENSTACK_TENANT is the project name to use in the deployment. + * OPENSTACK_IMAGENAME is the name of the image to create. + * OPENSTACK_PASSWORD is the password of the user. + + + The extension will connect to OpenStack using python-glanceclient + to configure a raw image. + + ''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + self.check_location(location) + + os_params = self.get_openstack_parameters() + + fd, raw_disk = tempfile.mkstemp() + os.close(fd) + self.create_local_system(temp_root, raw_disk) + self.status(msg='Temporary disk image has been created at %s' + % raw_disk) + + self.set_extlinux_root_to_virtio(raw_disk) + + self.configure_openstack_image(raw_disk, location, os_params) + + def set_extlinux_root_to_virtio(self, raw_disk): + '''Re-configures extlinux to use virtio disks''' + self.status(msg='Updating extlinux.conf') + mp = self.mount(raw_disk) + try: + path = os.path.join(mp, 'extlinux.conf') + + with open(path) as f: + extlinux_conf = f.read() + + extlinux_conf = extlinux_conf.replace('root=/dev/sda', + 'root=/dev/vda') + with open(path, "w") as f: + f.write(extlinux_conf) + + finally: + self.unmount(mp) + + def get_openstack_parameters(self): + '''Check the environment variables needed and returns all. + + The environment variables are described in the class documentation. + ''' + + keys = ('OPENSTACK_USER', 'OPENSTACK_TENANT', + 'OPENSTACK_IMAGENAME', 'OPENSTACK_PASSWORD') + for key in keys: + if key not in os.environ: + raise cliapp.AppException(key + ' was not given') + return (os.environ[key] for key in keys) + + def check_location(self, location): + x = urlparse.urlparse(location) + if x.scheme != 'http': + raise cliapp.AppException('URL schema must be http in %s' \ + % location) + if (x.path != '/v2.0' and x.path != '/v2.0/'): + raise cliapp.AppException('API version must be v2.0 in %s'\ + % location) + + def configure_openstack_image(self, raw_disk, auth_url, os_params): + '''Configure the image in OpenStack using glance-client''' + self.status(msg='Configuring OpenStack image...') + + username, tenant_name, image_name, password = os_params + cmdline = ['glance', + '--os-username', username, + '--os-tenant-name', tenant_name, + '--os-password', password, + '--os-auth-url', auth_url, + 'image-create', + '--name=%s' % image_name, + '--disk-format=raw', + '--container-format', 'bare', + '--file', raw_disk] + cliapp.runcmd(cmdline) + + self.status(msg='Image configured.') + +OpenStackWriteExtension().run() + diff --git a/vdaboot.configure b/vdaboot.configure new file mode 100755 index 00000000..cb54bf0d --- /dev/null +++ b/vdaboot.configure @@ -0,0 +1,34 @@ +#!/bin/sh +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +# Change the "/" mount point to /dev/vda to use virtio disks. + +set -e + +if [ "$OPENSTACK_USER" ] +then + # Modifying fstab + if [ -f "$1/etc/fstab" ] + then + mv "$1/etc/fstab" "$1/etc/fstab.old" + awk 'BEGIN {print "/dev/vda / btrfs defaults,rw,noatime 0 1"}; + $2 != "/" {print $0 };' "$1/etc/fstab.old" > "$1/etc/fstab" + rm "$1/etc/fstab.old" + else + echo "/dev/vda / btrfs defaults,rw,noatime 0 1"> "$1/etc/fstab" + fi +fi -- cgit v1.2.1 From dc3d9cd1de7fcc4d0fed2ff4d958c73817415d9c Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Mon, 16 Sep 2013 14:51:24 +0000 Subject: exts: Remove tab from vdaboot.configure This snuck in since the test suite could not be run when TMPDIR was on a tmpfs. --- vdaboot.configure | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vdaboot.configure b/vdaboot.configure index cb54bf0d..b88eb3a8 100755 --- a/vdaboot.configure +++ b/vdaboot.configure @@ -20,7 +20,7 @@ set -e if [ "$OPENSTACK_USER" ] -then +then # Modifying fstab if [ -f "$1/etc/fstab" ] then -- cgit v1.2.1 From 2492bd568b82c1f071a4c67800baaf6fae3e690c Mon Sep 17 00:00:00 2001 From: Dan Firth Date: Tue, 8 Oct 2013 16:00:31 +0000 Subject: Removed ssh.configure extension --- ssh.configure | 162 ---------------------------------------------------------- 1 file changed, 162 deletions(-) delete mode 100755 ssh.configure diff --git a/ssh.configure b/ssh.configure deleted file mode 100755 index 2f3167e7..00000000 --- a/ssh.configure +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -'''A Morph deployment configuration to copy SSH keys. - -Keys are copied from the host to the new system. -''' - -import cliapp -import os -import sys -import shutil -import glob -import re -import logging - -import morphlib - -class SshConfigurationExtension(cliapp.Application): - - '''Copy over SSH keys to new system from host. - - The extension requires SSH_KEY_DIR to be set at the command line as it - will otherwise pass with only a status update. SSH_KEY_DIR should be - set to the location of the SSH keys to be passed to the new system. - - ''' - - def process_args(self, args): - if 'SSH_KEY_DIR' in os.environ: - # Copies ssh_host keys. - key = 'ssh_host_*_key' - mode = 0755 - dest = os.path.join(args[0], 'etc/ssh/') - sshhost, sshhostpub = self.find_keys(key) - if sshhost or sshhostpub: - self.check_dir(dest, mode) - self.copy_keys(sshhost, sshhostpub, dest) - - # Copies root keys. - key = 'root_*_key' - mode = 0700 - dest = os.path.join(args[0], 'root/.ssh/') - roothost, roothostpub = self.find_keys(key) - key = 'root_authorized_key_*.pub' - authkey, bleh = self.find_keys(key) - if roothost or roothostpub: - self.check_dir(dest, mode) - self.copy_rename_keys(roothost, - roothostpub, dest, 'id_', [5, 4]) - if authkey: - self.check_dir(dest, mode) - self.comb_auth_key(authkey, dest) - - # Fills the known_hosts file - key = 'root_known_host_*_key.pub' - src = os.path.join(os.environ['SSH_KEY_DIR'], key) - known_hosts_keys = glob.glob(src) - if known_hosts_keys: - self.check_dir(dest, mode) - known_hosts_path = os.path.join(dest, 'known_hosts') - with open(known_hosts_path, "a") as known_hosts_file: - for filename in known_hosts_keys: - hostname = re.search('root_known_host_(.+?)_key.pub', - filename).group(1) - known_hosts_file.write(hostname + " ") - with open(filename, "r") as f: - shutil.copyfileobj(f, known_hosts_file) - - else: - self.status(msg="No SSH key directory found.") - pass - - def find_keys(self, key_name): - '''Uses glob to find public and - private SSH keys and returns their path''' - - src = os.path.join(os.environ['SSH_KEY_DIR'], key_name) - keys = glob.glob(src) - pubkeys = glob.glob(src + '.pub') - if not (keys or pubkeys): - self.status(msg="No SSH keys of pattern %(src)s found.", src=src) - return keys, pubkeys - - def check_dir(self, dest, mode): - '''Checks if destination directory exists - and creates it if necessary''' - - if os.path.exists(dest) == False: - self.status(msg="Creating SSH key directory: %(dest)s", dest=dest) - os.mkdir(dest) - os.chmod(dest, mode) - else: - pass - - def copy_keys(self, keys, pubkeys, dest): - '''Copies SSH keys to new VM''' - - for key in keys: - shutil.copy(key, dest) - path = os.path.join(dest, os.path.basename(key)) - os.chmod(path, 0600) - for key in pubkeys: - shutil.copy(key, dest) - path = os.path.join(dest, os.path.basename(key)) - os.chmod(path, 0644) - - def copy_rename_keys(self, keys, pubkeys, dest, new, snip): - '''Copies SSH keys to new VM and renames them''' - - st, fi = snip - for key in keys: - base = os.path.basename(key) - s = len(base) - nw_dst = os.path.join(dest, new + base[st:s-fi]) - shutil.copy(key, nw_dst) - os.chmod(nw_dst, 0600) - for key in pubkeys: - base = os.path.basename(key) - s = len(base) - nw_dst = os.path.join(dest, new + base[st:s-fi-4]) - shutil.copy(key, nw_dst + '.pub') - os.chmod(nw_dst + '.pub', 0644) - - def comb_auth_key(self, keys, dest): - '''Combines authorized_keys file in new VM''' - - dest = os.path.join(dest, 'authorized_keys') - fout = open(dest, 'a') - for key in keys: - fin = open(key, 'r') - data = fin.read() - fout.write(data) - fin.close() - fout.close() - os.chmod(dest, 0600) - - def status(self, **kwargs): - '''Provide status output. - - The ``msg`` keyword argument is the actual message, - the rest are values for fields in the message as interpolated - by %. - - ''' - - self.output.write('%s\n' % (kwargs['msg'] % kwargs)) - -SshConfigurationExtension().run() -- cgit v1.2.1 From 1e57089d8aa73ffc49ef80622206d43e1bd15bf0 Mon Sep 17 00:00:00 2001 From: Dan Firth Date: Thu, 10 Oct 2013 16:07:37 +0000 Subject: Deployment failures will now remove the disk image --- kvm.write | 1 + rawdisk.write | 10 ++++++++-- virtualbox-ssh.write | 2 +- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/kvm.write b/kvm.write index 9a6be135..4f877c22 100755 --- a/kvm.write +++ b/kvm.write @@ -68,6 +68,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): except BaseException: sys.stderr.write('Error deploying to libvirt') os.remove(raw_disk) + cliapp.ssh_runcmd(ssh_host, ['rm', '-f', vm_path]) raise else: os.remove(raw_disk) diff --git a/rawdisk.write b/rawdisk.write index a74d6905..8723ac0c 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -46,8 +46,14 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): if os.path.isfile(location): self.upgrade_local_system(location, temp_root) else: - self.create_local_system(temp_root, location) - self.status(msg='Disk image has been created at %s' % location) + try: + self.create_local_system(temp_root, location) + self.status(msg='Disk image has been created at %s' % location) + except Exception: + os.remove(location) + self.status(msg='Failure to create disk image at %s' % + location) + raise def upgrade_local_system(self, raw_disk, temp_root): mp = self.mount(raw_disk) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 2374db31..f18ef804 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -76,10 +76,10 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): except BaseException: sys.stderr.write('Error deploying to VirtualBox') os.remove(raw_disk) + cliapp.ssh_runcmd(ssh_host, ['rm', '-f', vdi_path]) raise else: os.remove(raw_disk) - self.status( msg='Virtual machine %(vm_name)s has been created', vm_name=vm_name) -- cgit v1.2.1 From 14eb503d24a46dc9163d2445f40c14f92143a719 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Wed, 23 Oct 2013 17:17:41 +0000 Subject: Add fstab.configure This will allow the user to append text to /etc/fstab during a deployment, without having to write custom configuration extensions. --- fstab.configure | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100755 fstab.configure diff --git a/fstab.configure b/fstab.configure new file mode 100755 index 00000000..0100dacb --- /dev/null +++ b/fstab.configure @@ -0,0 +1,40 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# =*= License: GPL-2 =*= + + +import os +import sys + + +def asciibetical(strings): + + def key(s): + return [ord(c) for c in s] + + return sorted(strings, key=key) + + +fstab_filename = os.path.join(sys.argv[1], 'etc', 'fstab') + +fstab_vars = sorted(x for x in os.environ if x.startswith('FSTAB_')) +with open(fstab_filename, 'a') as f: + for var in fstab_vars: + f.write('%s\n' % os.environ[var]) + +os.chown(fstab_filename, 0, 0) +os.chmod(fstab_filename, 0644) -- cgit v1.2.1 From 3f35c5ab40849f8f8d36a084cb3ab2ec8f17f7c6 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Thu, 24 Oct 2013 17:35:25 +0000 Subject: Merge remote-tracking branch 'origin/liw/fstab.configure' Reviewed-by: Richard Maw At his suggestion, fixed the call to sorted() to be a call to asciibetical(). --- fstab.configure | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fstab.configure b/fstab.configure index 0100dacb..a1287ea4 100755 --- a/fstab.configure +++ b/fstab.configure @@ -31,7 +31,7 @@ def asciibetical(strings): fstab_filename = os.path.join(sys.argv[1], 'etc', 'fstab') -fstab_vars = sorted(x for x in os.environ if x.startswith('FSTAB_')) +fstab_vars = asciibetical(x for x in os.environ if x.startswith('FSTAB_')) with open(fstab_filename, 'a') as f: for var in fstab_vars: f.write('%s\n' % os.environ[var]) -- cgit v1.2.1 From 45759fcbd49784401110ea399e04cd7e8fe85d44 Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Mon, 23 Dec 2013 14:18:59 +0000 Subject: Add optional overwrite option This option lets the install-files config extension overwrite existing files. A file will only be overwritten if the overwrite flag is specified for that file. Since the overwrite arg is optionally prepended to the manifest line, this patch should not break existing manifests With this patch default config files can be replaced with project specific config files --- install-files.configure | 42 +++++++++++++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/install-files.configure b/install-files.configure index 669fc518..8ba61b4e 100755 --- a/install-files.configure +++ b/install-files.configure @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013-2014 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -35,9 +35,9 @@ class InstallFilesConfigureExtension(cliapp.Application): '''Install the files specified in the manifests listed in INSTALL_FILES - The manifest is formatted as: + Entries in the manifest are formatted as: - + [overwrite] Where the filename is how the file is found inside whatever directory the manifest is stored in, and also the path within the system to @@ -47,6 +47,18 @@ class InstallFilesConfigureExtension(cliapp.Application): This extension supports files, symlinks and directories. + For example, + + 0100644 0 0 /etc/issue + + creates a regular file at /etc/issue with 644 permissions, + uid 0 and gid 0, if the file doesn't already exist. + + overwrite 0100644 0 0 /etc/issue + + creates a regular file at /etc/issue with 644 permissions, + uid 0 and gid 0, if the file already exists it is overwritten. + ''' def process_args(self, args): @@ -65,14 +77,22 @@ class InstallFilesConfigureExtension(cliapp.Application): self.install_entry(entry, manifest_dir, target_root) def install_entry(self, entry, manifest_root, target_root): - entry_data = re.split('\W+', entry.strip(), maxsplit=3) - mode = int(entry_data[0], 8) - uid = int(entry_data[1]) - gid = int(entry_data[2]) - path = entry_data[3] + m = re.match('(overwrite )?([0-7]+) ([0-9]+) ([0-9]+) (\S+)', entry) + + if m: + overwrite = m.group(1) + mode = int(m.group(2), 8) # mode is octal + uid = int(m.group(3)) + gid = int(m.group(4)) + path = m.group(5) + else: + raise cliapp.AppException('Invalid manifest entry, ' + 'format: [overwrite] ' + '') + dest_path = os.path.join(target_root, './' + path) if stat.S_ISDIR(mode): - if os.path.exists(dest_path): + if os.path.exists(dest_path) and not overwrite: dest_stat = os.stat(dest_path) if (mode != dest_stat.st_mode or uid != dest_stat.st_uid @@ -86,7 +106,7 @@ class InstallFilesConfigureExtension(cliapp.Application): os.chmod(dest_path, mode) elif stat.S_ISLNK(mode): - if os.path.lexists(dest_path): + if os.path.lexists(dest_path) and not overwrite: raise cliapp.AppException('Symlink already exists at %s' % dest_path) else: @@ -96,7 +116,7 @@ class InstallFilesConfigureExtension(cliapp.Application): os.lchown(dest_path, uid, gid) elif stat.S_ISREG(mode): - if os.path.lexists(dest_path): + if os.path.lexists(dest_path) and not overwrite: raise cliapp.AppException('File already exists at %s' % dest_path) else: -- cgit v1.2.1 From 8fd0a807b70b19ac5a59da4e05a3abe5652ef34a Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Wed, 22 Jan 2014 17:42:18 +0000 Subject: virtualbox-ssh: Work around change in VBox options VirtualBox changed a command line option in 4.3 incompatibly, so we now have to check the version number and change an option from --sataportcount to --portcount if the version of VirtualBox running on the target is at least 4.3 This turns the version into a tuple and compares it against another, since it's more reliable than comparing strings, which will count '1.10' as earlier than '1.2', and more convenient than comparing the digits individually. --- virtualbox-ssh.write | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index f18ef804..369c0d61 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -106,6 +106,19 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): str(os.path.getsize(raw_disk))], stdin=f) + def virtualbox_version(self, ssh_host): + 'Get the version number of the VirtualBox running on the remote host.' + + # --version gives a build id, which looks something like + # 1.2.3r456789, so we need to strip the suffix off and get a tuple + # of the (major, minor, patch) version, since comparing with a + # tuple is more reliable than a string and more convenient than + # comparing against the major, minor and patch numbers directly + self.status(msg='Checking version of remote VirtualBox') + build_id = cliapp.ssh_runcmd(ssh_host, ['VBoxManage', '--version']) + version_string = re.match(r"^([0-9\.])+.*$", build_id.strip()).group(1) + return tuple(int(s or '0') for s in version_string.split('.')) + def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart): '''Create the VirtualBox virtual machine.''' @@ -117,6 +130,11 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): hostonly_iface = self.get_host_interface(ssh_host) + if self.virtualbox_version(ssh_host) < (4, 3): + sataportcount_option = '--sataportcount' + else: + sataportcount_option = '--portcount' + commands = [ ['createvm', '--name', vm_name, '--ostype', 'Linux26_64', '--register'], @@ -125,7 +143,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '--nic1', 'hostonly', '--hostonlyadapter1', hostonly_iface, '--nic2', 'nat', '--natnet2', 'default'], ['storagectl', vm_name, '--name', 'SATA Controller', - '--add', 'sata', '--bootable', 'on', '--sataportcount', '2'], + '--add', 'sata', '--bootable', 'on', sataportcount_option, '2'], ['storageattach', vm_name, '--storagectl', 'SATA Controller', '--port', '0', '--device', '0', '--type', 'hdd', '--medium', vdi_path], -- cgit v1.2.1 From fc7b833170c66acb4672184146d560c6fda20183 Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Tue, 28 Jan 2014 18:17:09 +0000 Subject: Fix copyright year in previous commit --- virtualbox-ssh.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 369c0d61..204b2447 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2012-2013 Codethink Limited +# Copyright (C) 2012-2014 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by -- cgit v1.2.1 From e419d582cfd0ea873b393b2773c1b0670d16afe0 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Wed, 12 Feb 2014 19:03:05 +0000 Subject: deploy: Finish off the Btrfs system layout implementation The shared state directories defined in writeexts.py (/var, /home etc.) are now separate Btrfs subvolumes that are mounted in place using fstab. There are some warnings on mounting /var and /srv about the mountpoint not being empty. Not yet investigated. If a configure extension has already added / to the fstab, use the device it chose rather than assuming /dev/sda. This is required for the vdaboot.configure extension that we use for OpenStack deployments. Similarly, if a configure extension has added an entry for a state directory in /etc/fstab already, we don't replace it with a /state/xxx directory. That's only done as a default behaviour. --- rawdisk.write | 2 ++ ssh-rsync.write | 2 ++ 2 files changed, 4 insertions(+) diff --git a/rawdisk.write b/rawdisk.write index 8723ac0c..1b4d58c0 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -56,6 +56,8 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): raise def upgrade_local_system(self, raw_disk, temp_root): + self.complete_fstab_for_btrfs_layout(temp_root) + mp = self.mount(raw_disk) version_label = self.get_version_label(mp) diff --git a/ssh-rsync.write b/ssh-rsync.write index 211dbe5e..fe72bc9a 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -47,6 +47,8 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): self.upgrade_remote_system(location, temp_root) def upgrade_remote_system(self, location, temp_root): + self.complete_fstab_for_btrfs_layout(temp_root) + root_disk = self.find_root_disk(location) version_label = os.environ.get('VERSION_LABEL') -- cgit v1.2.1 From 9886dd3e919f7dc66b9099e3c8ab1be79404ae31 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Fri, 14 Feb 2014 12:08:33 +0000 Subject: deploy: Depend on client OS version manager to deploy upgrades We now have a OS version manager tool in Baserock (in tbdiff.git). The code to deploy a new base OS version should live there, to minimise duplication between write extensions. --- ssh-rsync.write | 132 ++++++++++++++++++-------------------------------------- 1 file changed, 41 insertions(+), 91 deletions(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index fe72bc9a..4961ee4d 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013-2014 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -26,6 +26,14 @@ import tempfile import morphlib.writeexts + +def ssh_runcmd_ignore_failure(location, command, **kwargs): + try: + return cliapp.ssh_runcmd(location, command, **kwargs) + except cliapp.AppException: + pass + + class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): '''Upgrade a running baserock system with ssh and rsync. @@ -58,15 +66,11 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): self.status(msg='Mounting root disk') cliapp.ssh_runcmd(location, ['mount', root_disk, remote_mnt]) except Exception as e: - try: - cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) - except: - pass + ssh_runcmd_ignore_failure(location, ['rmdir', remote_mnt]) raise e try: version_root = os.path.join(remote_mnt, 'systems', version_label) - run_dir = os.path.join(version_root, 'run') orig_dir = os.path.join(version_root, 'orig') self.status(msg='Creating %s' % version_root) @@ -75,81 +79,32 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): self.create_remote_orig(location, version_root, remote_mnt, temp_root) - self.status(msg='Creating "run" subvolume') - cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', - 'snapshot', orig_dir, run_dir]) - - self.status(msg='Updating system configuration') - bscs_loc = os.path.join(run_dir, 'usr', 'bin', - 'baserock-system-config-sync') - - output = cliapp.ssh_runcmd(location, ['sh', '-c', - '"$1" merge "$2" &> /dev/null || echo -n cmdfailed', - '-', bscs_loc, version_label]) - if output == "cmdfailed": - self.status(msg='Updating system configuration failed') - - self.install_remote_kernel(location, version_root, temp_root) - default_path = os.path.join(remote_mnt, 'systems', 'default') - if self.bootloader_is_wanted(): - output = cliapp.ssh_runcmd(location, ['sh', '-c', - 'test -e "$1" && stat -c %F "$1" ' - '|| echo missing file', - '-', default_path]) - if output != "symbolic link": - # we are upgrading and old system that does - # not have an updated extlinux config file - self.update_remote_extlinux(location, remote_mnt, - version_label) - cliapp.ssh_runcmd(location, ['ln', '-sfn', version_label, - default_path]) + # Use the system-version-manager from the new system we just + # installed, so that we can upgrade from systems that don't have + # it installed. + self.status(msg='Calling system-version-manager to deploy upgrade') + deployment = os.path.join('/systems', version_label, 'orig') + system_config_sync = os.path.join( + remote_mnt, 'systems', version_label, 'orig', 'usr', 'bin', + 'baserock-system-config-sync') + system_version_manager = os.path.join( + remote_mnt, 'systems', version_label, 'orig', 'usr', 'bin', + 'system-version-manager') + cliapp.ssh_runcmd(location, + ['env', 'BASEROCK_SYSTEM_CONFIG_SYNC='+system_config_sync, + system_version_manager, 'deploy', deployment]) except Exception as e: - try: - cliapp.ssh_runcmd(location, - ['btrfs', 'subvolume', 'delete', run_dir]) - except: - pass - try: - cliapp.ssh_runcmd(location, - ['btrfs', 'subvolume', 'delete', orig_dir]) - except: - pass - try: - cliapp.ssh_runcmd(location, ['rm', '-rf', version_root]) - except: - pass + self.status(msg='Deployment failed') + ssh_runcmd_ignore_failure( + location, ['btrfs', 'subvolume', 'delete', orig_dir]) + ssh_runcmd_ignore_failure( + location, ['rm', '-rf', version_root]) raise e finally: self.status(msg='Removing temporary mounts') cliapp.ssh_runcmd(location, ['umount', remote_mnt]) cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) - def update_remote_extlinux(self, location, remote_mnt, version_label): - '''Install/reconfigure extlinux on location''' - - self.status(msg='Creating extlinux.conf') - config = os.path.join(remote_mnt, 'extlinux.conf') - temp_fd, temp_path = tempfile.mkstemp() - with os.fdopen(temp_fd, 'w') as f: - f.write('default linux\n') - f.write('timeout 1\n') - f.write('label linux\n') - f.write('kernel /systems/default/kernel\n') - f.write('append root=/dev/sda ' - 'rootflags=subvol=systems/default/run ' - 'init=/sbin/init rw\n') - - try: - cliapp.runcmd(['rsync', '-as', temp_path, - '%s:%s~' % (location, config)]) - cliapp.ssh_runcmd(location, ['mv', config+'~', config]) - except Exception as e: - try: - cliapp.ssh_runcmd(location, ['rm', '-f', config+'~']) - except: - pass - raise e - def create_remote_orig(self, location, version_root, remote_mnt, temp_root): '''Create the subvolume version_root/orig on location''' @@ -180,18 +135,6 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): if (line_words[1] == '/' and line_words[0] != 'rootfs'): return line_words[0] - def install_remote_kernel(self, location, version_root, temp_root): - '''Install the kernel in temp_root inside version_root on location''' - - self.status(msg='Installing kernel') - image_names = ('vmlinuz', 'zImage', 'uImage') - kernel_dest = os.path.join(version_root, 'kernel') - for name in image_names: - try_path = os.path.join(temp_root, 'boot', name) - if os.path.exists(try_path): - cliapp.runcmd(['rsync', '-as', try_path, - '%s:%s' % (location, kernel_dest)]) - def check_valid_target(self, location): try: cliapp.ssh_runcmd(location, ['true']) @@ -205,10 +148,17 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('%s is not a baserock system' % location) - output = cliapp.ssh_runcmd(location, ['sh', '-c', - 'type rsync &> /dev/null || echo -n cmdnotfound']) - if output == 'cmdnotfound': - raise cliapp.AppException('%s does not have rsync' - % location) + def check_command_exists(command): + test = 'type %s > /dev/null 2>&1 || echo -n cmdnotfound' % command + output = cliapp.ssh_runcmd(location, ['sh', '-c', test]) + if output == 'cmdnotfound': + raise cliapp.AppException( + "%s does not have %s" % (location, command)) + + # The deploy requires baserock-system-config-sync and + # system-version-manager in the new system only. The old system doesn't + # need to have them at all. + check_command_exists('rsync') + SshRsyncWriteExtension().run() -- cgit v1.2.1 From 539eed7e4045ef39377f4c7fe6cc9314799b9f4d Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 4 Mar 2014 11:49:02 +0000 Subject: deploy: Always set new system as default --- ssh-rsync.write | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ssh-rsync.write b/ssh-rsync.write index 4961ee4d..8dc0fe35 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -93,6 +93,11 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): cliapp.ssh_runcmd(location, ['env', 'BASEROCK_SYSTEM_CONFIG_SYNC='+system_config_sync, system_version_manager, 'deploy', deployment]) + + self.status(msg='Setting %s as the new default system' % + version_label) + cliapp.ssh_runcmd(location, + [system_version_manager, 'set-default', version_label]) except Exception as e: self.status(msg='Deployment failed') ssh_runcmd_ignore_failure( -- cgit v1.2.1 From e8b7abe65fcab62ecb84b7f0c2c6f9dde4e63ea7 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Mon, 17 Feb 2014 15:36:56 +0000 Subject: Make parse_autostart() into more general get_environment_boolean() Also, be more flexible when parsing environment booleans -- convert to lower case and match 0/1 and true/false as well as yes/no. --- kvm.write | 4 ++-- rawdisk.write | 2 +- virtualbox-ssh.write | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/kvm.write b/kvm.write index 4f877c22..94560972 100755 --- a/kvm.write +++ b/kvm.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2012-2013 Codethink Limited +# Copyright (C) 2012-2014 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args ssh_host, vm_name, vm_path = self.parse_location(location) - autostart = self.parse_autostart() + autostart = self.get_environment_boolean('AUTOSTART') fd, raw_disk = tempfile.mkstemp() os.close(fd) diff --git a/rawdisk.write b/rawdisk.write index 1b4d58c0..bde9d67d 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2012-2013 Codethink Limited +# Copyright (C) 2012-2014 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 204b2447..2a2f3f7b 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -62,7 +62,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args ssh_host, vm_name, vdi_path = self.parse_location(location) - autostart = self.parse_autostart() + autostart = self.get_environment_boolean('AUTOSTART') fd, raw_disk = tempfile.mkstemp() os.close(fd) -- cgit v1.2.1 From 9293be701e6b8ae2a1017bc5df9b80c85c735173 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Mon, 17 Feb 2014 13:31:47 +0000 Subject: deploy: Honour AUTOSTART in ssh-rsync extension Now you can deploy an upgrade, set it to be the default version and reboot into it all with one call to `morph deploy`. --- ssh-rsync.write | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ssh-rsync.write b/ssh-rsync.write index 8dc0fe35..509520ae 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -59,6 +59,7 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): root_disk = self.find_root_disk(location) version_label = os.environ.get('VERSION_LABEL') + autostart = self.get_environment_boolean('AUTOSTART') self.status(msg='Creating remote mount point') remote_mnt = cliapp.ssh_runcmd(location, ['mktemp', '-d']).strip() @@ -110,6 +111,10 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): cliapp.ssh_runcmd(location, ['umount', remote_mnt]) cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) + if autostart: + self.status(msg="Rebooting into new system ...") + ssh_runcmd_ignore_failure(location, ['reboot']) + def create_remote_orig(self, location, version_root, remote_mnt, temp_root): '''Create the subvolume version_root/orig on location''' -- cgit v1.2.1 From c808549169a7704cdd5928e0d75a30ecc8036487 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Mon, 17 Feb 2014 16:17:00 +0000 Subject: deploy: Check the --upgrade flag has been used correctly. Most write extensions don't handle both initial deployments and upgrades of a system. --- kvm.check | 35 +++++++++++++++++++++++++++++++++++ nfsboot.check | 34 ++++++++++++++++++++++++++++++++++ openstack.check | 35 +++++++++++++++++++++++++++++++++++ ssh-rsync.check | 36 ++++++++++++++++++++++++++++++++++++ tar.check | 24 ++++++++++++++++++++++++ virtualbox-ssh.check | 35 +++++++++++++++++++++++++++++++++++ 6 files changed, 199 insertions(+) create mode 100755 kvm.check create mode 100755 nfsboot.check create mode 100755 openstack.check create mode 100755 ssh-rsync.check create mode 100755 tar.check create mode 100755 virtualbox-ssh.check diff --git a/kvm.check b/kvm.check new file mode 100755 index 00000000..be7c51c2 --- /dev/null +++ b/kvm.check @@ -0,0 +1,35 @@ +#!/usr/bin/python +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +'''Preparatory checks for Morph 'kvm' write extension''' + +import cliapp + +import morphlib.writeexts + + +class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + raise cliapp.AppException( + 'Use the `ssh-rsync` write extension to deploy upgrades to an ' + 'existing remote system.') + +KvmPlusSshCheckExtension().run() diff --git a/nfsboot.check b/nfsboot.check new file mode 100755 index 00000000..092a1df7 --- /dev/null +++ b/nfsboot.check @@ -0,0 +1,34 @@ +#!/usr/bin/python +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +'''Preparatory checks for Morph 'nfsboot' write extension''' + +import cliapp + +import morphlib.writeexts + + +class NFSBootCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + raise cliapp.AppException( + 'Upgrading is not currently supported for NFS deployments.') + +NFSBootCheckExtension().run() diff --git a/openstack.check b/openstack.check new file mode 100755 index 00000000..a9a8fe1b --- /dev/null +++ b/openstack.check @@ -0,0 +1,35 @@ +#!/usr/bin/python +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +'''Preparatory checks for Morph 'openstack' write extension''' + +import cliapp + +import morphlib.writeexts + + +class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + raise cliapp.AppException( + 'Use the `ssh-rsync` write extension to deploy upgrades to an ' + 'existing remote system.') + +OpenStackCheckExtension().run() diff --git a/ssh-rsync.check b/ssh-rsync.check new file mode 100755 index 00000000..90029cb4 --- /dev/null +++ b/ssh-rsync.check @@ -0,0 +1,36 @@ +#!/usr/bin/python +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +'''Preparatory checks for Morph 'ssh-rsync' write extension''' + +import cliapp + +import morphlib.writeexts + + +class SshRsyncCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + upgrade = self.get_environment_boolean('UPGRADE') + if not upgrade: + raise cliapp.AppException( + 'The ssh-rsync write is for upgrading existing remote ' + 'Baserock machines. It cannot be used for an initial ' + 'deployment.') + +SshRsyncCheckExtension().run() diff --git a/tar.check b/tar.check new file mode 100755 index 00000000..cbeaf163 --- /dev/null +++ b/tar.check @@ -0,0 +1,24 @@ +#!/bin/sh +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +# Preparatory checks for Morph 'tar' write extension + +set -eu + +if [ "$UPGRADE" == "yes" ]; then + echo >&2 "ERROR: Cannot upgrade a tar file deployment." + exit 1 +fi diff --git a/virtualbox-ssh.check b/virtualbox-ssh.check new file mode 100755 index 00000000..1aeb8999 --- /dev/null +++ b/virtualbox-ssh.check @@ -0,0 +1,35 @@ +#!/usr/bin/python +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +'''Preparatory checks for Morph 'virtualbox-ssh' write extension''' + +import cliapp + +import morphlib.writeexts + + +class VirtualBoxPlusSshCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + raise cliapp.AppException( + 'Use the `ssh-rsync` write extension to deploy upgrades to an ' + 'existing remote system.') + +VirtualBoxPlusSshCheckExtension().run() -- cgit v1.2.1 From 0bccd86aa45da7ddde2486c21cee939dc871c53f Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Mon, 17 Feb 2014 17:03:54 +0000 Subject: deploy: Fix double exception in rawdisk.write If the disk image was not yet created then the os.remove() call fails and the original exception gets lost, causing confusion and sadness. Also print status earlier on failure --- rawdisk.write | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rawdisk.write b/rawdisk.write index bde9d67d..87edf7bf 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -50,9 +50,10 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): self.create_local_system(temp_root, location) self.status(msg='Disk image has been created at %s' % location) except Exception: - os.remove(location) self.status(msg='Failure to create disk image at %s' % location) + if os.path.exists(location): + os.remove(location) raise def upgrade_local_system(self, raw_disk, temp_root): -- cgit v1.2.1 From 8e48860748a4455420fdccfb00395f7f395e8e3c Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Mon, 10 Mar 2014 16:59:07 +0000 Subject: Add sysroot write extension This moves the deployed system to somewhere on the host. Any existing contents of the directory is deleted, so don't try to be clever and deploy a new system on top of / in place of a proper upgrade. It can be used to deploy a chroot, sysroot or container, but its current use is to allow for nested deployments to include another system in itself, since the parent deployment's "$1" is prepended to the sub-deployment's "$2". --- sysroot.write | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100755 sysroot.write diff --git a/sysroot.write b/sysroot.write new file mode 100755 index 00000000..1ae4864f --- /dev/null +++ b/sysroot.write @@ -0,0 +1,29 @@ +#!/bin/sh +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +# A Morph write extension to deploy to another directory + +set -eu + +# Ensure the target is an empty directory +mkdir -p "$2" +find "$2" -mindepth 1 -delete + +# Move the contents of our source directory to our target +# Previously we would (cd "$1" && find -print0 | cpio -0pumd "$absolute_path") +# to do this, but the source directory is disposable anyway, so we can move +# its contents to save time +find "$1" -maxdepth 1 -mindepth 1 -exec mv {} "$2/." + -- cgit v1.2.1 From 5e686455f6cd4ef5870933d60544af78b167a545 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 18 Mar 2014 17:29:13 +0000 Subject: Remove fstab hack from nfsboot.configure There is no longer a default /etc/fstab in the Baserock fhs-dirs chunk, and the nfsboot.write extension does not use the default Btrfs system layout so no entry is added for / to /etc/fstab at deploy-time. We cannot have / in /etc/fstab for nfsboot deployments because it causes systemd to remount / during bootup, which breaks everything. --- nfsboot.configure | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/nfsboot.configure b/nfsboot.configure index 8dc6c67c..d6b254d4 100755 --- a/nfsboot.configure +++ b/nfsboot.configure @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013-2014 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -26,7 +26,4 @@ auto lo iface lo inet loopback EOF - # Stop fstab from mounting '/' - mv "$1/etc/fstab" "$1/etc/fstab.old" - awk '/^ *#/ || $2 != "/"' "$1/etc/fstab.old" > "$1/etc/fstab" fi -- cgit v1.2.1 From fbf84afbb9c0bc27816ffcf310a681335c4775ee Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 18 Mar 2014 17:40:58 +0000 Subject: Improve comment in nfsboot.configure --- nfsboot.configure | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nfsboot.configure b/nfsboot.configure index d6b254d4..660d9c39 100755 --- a/nfsboot.configure +++ b/nfsboot.configure @@ -15,7 +15,9 @@ # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# Remove all networking interfaces and stop fstab from mounting '/' +# Remove all networking interfaces. On nfsboot systems, eth0 is set up +# during kernel init, and the normal ifup@eth0.service systemd unit +# would break the NFS connection and cause the system to hang. set -e -- cgit v1.2.1 From 9faeac0a60b40256454e7049964898f3c749fd62 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 18 Mar 2014 17:32:59 +0000 Subject: Make sanity checks for nfsboot deployments run earlier Move some code to the '.check' extension to verify that the deployment can happen *before* spending 5 minutes unpacking and configuring the rootfs. This is not a perfect solution yet because when multiple systems are being deployed in a cluster, we do not check all systems and then deploy them all. Instead, we check one, then deploy it, then check the second, etc. --- nfsboot.check | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ nfsboot.write | 59 ++++------------------------------------------------ 2 files changed, 70 insertions(+), 55 deletions(-) diff --git a/nfsboot.check b/nfsboot.check index 092a1df7..e240dd72 100755 --- a/nfsboot.check +++ b/nfsboot.check @@ -17,18 +17,84 @@ '''Preparatory checks for Morph 'nfsboot' write extension''' import cliapp +import os import morphlib.writeexts class NFSBootCheckExtension(morphlib.writeexts.WriteExtension): + + _nfsboot_root = '/srv/nfsboot' + def process_args(self, args): if len(args) != 1: raise cliapp.AppException('Wrong number of command line args') + location = args[0] + upgrade = self.get_environment_boolean('UPGRADE') if upgrade: raise cliapp.AppException( 'Upgrading is not currently supported for NFS deployments.') + hostname = os.environ.get('HOSTNAME', None) + if hostname is None: + raise cliapp.AppException('You must specify a HOSTNAME.') + if hostname == 'baserock': + raise cliapp.AppException('It is forbidden to nfsboot a system ' + 'with hostname "%s"' % hostname) + + self.test_good_server(location) + + version_label = os.getenv('VERSION_LABEL', 'factory') + versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', + version_label) + if self.version_exists(versioned_root, location): + raise cliapp.AppException( + 'Root file system for host %s (version %s) already exists on ' + 'the NFS server %s. Deployment aborted.' % (hostname, + version_label, location)) + + def test_good_server(self, server): + # Can be ssh'ed into + try: + cliapp.ssh_runcmd('root@%s' % server, ['true']) + except cliapp.AppException: + raise cliapp.AppException('You are unable to ssh into server %s' + % server) + + # Is an NFS server + try: + cliapp.ssh_runcmd( + 'root@%s' % server, ['test', '-e', '/etc/exports']) + except cliapp.AppException: + raise cliapp.AppException('server %s is not an nfs server' + % server) + try: + cliapp.ssh_runcmd( + 'root@%s' % server, ['systemctl', 'is-enabled', + 'nfs-server.service']) + + except cliapp.AppException: + raise cliapp.AppException('server %s does not control its ' + 'nfs server by systemd' % server) + + # TFTP server exports /srv/nfsboot/tftp + try: + cliapp.ssh_runcmd( + 'root@%s' % server, ['test' , '-d', '/srv/nfsboot/tftp']) + except cliapp.AppException: + raise cliapp.AppException('server %s does not export ' + '/srv/nfsboot/tftp' % server) + + def version_exists(self, versioned_root, location): + try: + cliapp.ssh_runcmd('root@%s' % location, + ['test', '-d', versioned_root]) + except cliapp.AppException: + return False + + return True + + NFSBootCheckExtension().run() diff --git a/nfsboot.write b/nfsboot.write index 34a72972..3bb66821 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013-2014 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -60,38 +60,18 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Wrong number of command line args') temp_root, location = args - hostname = self.get_hostname(temp_root) - if hostname == 'baserock': - raise cliapp.AppException('It is forbidden to nfsboot a system ' - 'with hostname "baserock"') - self.test_good_server(location) version_label = os.getenv('VERSION_LABEL', 'factory') + hostname = os.environ.get('HOSTNAME') + versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', version_label) - if self.version_exists(versioned_root, location): - raise cliapp.AppException('Version %s already exists on' - ' this device. Deployment aborted' - % version_label) + self.copy_rootfs(temp_root, location, versioned_root, hostname) self.copy_kernel(temp_root, location, versioned_root, version_label, hostname) self.configure_nfs(location, hostname) - def version_exists(self, versioned_root, location): - try: - cliapp.ssh_runcmd('root@%s' % location, - ['test', '-d', versioned_root]) - except cliapp.AppException: - return False - - return True - - def get_hostname(self, temp_root): - hostnamepath = os.path.join(temp_root, 'etc', 'hostname') - with open(hostnamepath) as f: - return f.readline().strip() - def create_local_state(self, location, hostname): statedir = os.path.join(self._nfsboot_root, hostname, 'state') subdirs = [os.path.join(statedir, 'home'), @@ -209,37 +189,6 @@ mv "$temp" "$target" 'root@%s' % location, ['systemctl', 'restart', 'nfs-server.service']) - def test_good_server(self, server): - # Can be ssh'ed into - try: - cliapp.ssh_runcmd('root@%s' % server, ['true']) - except cliapp.AppException: - raise cliapp.AppException('You are unable to ssh into server %s' - % server) - - # Is an NFS server - try: - cliapp.ssh_runcmd( - 'root@%s' % server, ['test', '-e', '/etc/exports']) - except cliapp.AppException: - raise cliapp.AppException('server %s is not an nfs server' - % server) - try: - cliapp.ssh_runcmd( - 'root@%s' % server, ['systemctl', 'is-enabled', - 'nfs-server.service']) - - except cliapp.AppException: - raise cliapp.AppException('server %s does not control its ' - 'nfs server by systemd' % server) - - # TFTP server exports /srv/nfsboot/tftp - try: - cliapp.ssh_runcmd( - 'root@%s' % server, ['test' , '-d', '/srv/nfsboot/tftp']) - except cliapp.AppException: - raise cliapp.AppException('server %s does not export ' - '/srv/nfsboot/tftp' % server) NFSBootWriteExtension().run() -- cgit v1.2.1 From 5847e6d821748b386d68e8e982d8efcb3358e4e9 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Mon, 24 Mar 2014 16:24:49 +0000 Subject: Be consistent with nfsboot_root path We were building it from a variable in some places and hardcoding it in others; now we build it from a variable everywhere. --- nfsboot.check | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nfsboot.check b/nfsboot.check index e240dd72..f84f187f 100755 --- a/nfsboot.check +++ b/nfsboot.check @@ -80,12 +80,13 @@ class NFSBootCheckExtension(morphlib.writeexts.WriteExtension): 'nfs server by systemd' % server) # TFTP server exports /srv/nfsboot/tftp + tftp_root = os.path.join(self._nfsboot_root, 'tftp') try: cliapp.ssh_runcmd( - 'root@%s' % server, ['test' , '-d', '/srv/nfsboot/tftp']) + 'root@%s' % server, ['test' , '-d', tftp_root]) except cliapp.AppException: - raise cliapp.AppException('server %s does not export ' - '/srv/nfsboot/tftp' % server) + raise cliapp.AppException('server %s does not export %s' % + (tftp_root, server)) def version_exists(self, versioned_root, location): try: -- cgit v1.2.1 From 703572829cb5e4d2a45109020a8f7a32c031f554 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Mon, 24 Mar 2014 16:26:30 +0000 Subject: Use os.environ[] instead of os.environ.get() to read a required variable --- nfsboot.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nfsboot.write b/nfsboot.write index 3bb66821..8d3d6df7 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -62,7 +62,7 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args version_label = os.getenv('VERSION_LABEL', 'factory') - hostname = os.environ.get('HOSTNAME') + hostname = os.environ['HOSTNAME'] versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', version_label) -- cgit v1.2.1 From faa30c752633303baf53cf61de5bae42f43f6ab0 Mon Sep 17 00:00:00 2001 From: Mark Doffman Date: Tue, 1 Apr 2014 03:27:53 +0000 Subject: Add help files for nfsboot, rawdisk and tar. Begin adding help documentation for configuration and write extensions starting with nfsboot.write, rawdisk.write and tar.write. --- nfsboot.write.help | 12 ++++++++++++ rawdisk.write.help | 7 +++++++ tar.write.help | 5 +++++ 3 files changed, 24 insertions(+) create mode 100644 nfsboot.write.help create mode 100644 rawdisk.write.help create mode 100644 tar.write.help diff --git a/nfsboot.write.help b/nfsboot.write.help new file mode 100644 index 00000000..598b1b23 --- /dev/null +++ b/nfsboot.write.help @@ -0,0 +1,12 @@ +help: | + Deploy a system image and kernel to an nfsboot server. + + An nfsboot server is defined as a baserock system that has + tftp and nfs servers running, the tftp server is exporting + the contents of /srv/nfsboot/tftp/ and the user has sufficient + permissions to create nfs roots in /srv/nfsboot/nfs/. + + The `location` argument is the hostname of the nfsboot server. + + The extension will connect to root@HOST via ssh to copy the + kernel and rootfs, and configure the nfs server. diff --git a/rawdisk.write.help b/rawdisk.write.help new file mode 100644 index 00000000..a514a4e8 --- /dev/null +++ b/rawdisk.write.help @@ -0,0 +1,7 @@ +help: | + Create a raw disk image during Morph's deployment. + + If the image already exists, it is upgraded. + + The `location` argument is a pathname to the image to be + created or upgraded. diff --git a/tar.write.help b/tar.write.help new file mode 100644 index 00000000..f052ac03 --- /dev/null +++ b/tar.write.help @@ -0,0 +1,5 @@ +help: | + Create a .tar file of the deployed system. + + The `location` argument is a pathname to the .tar file to be + created. -- cgit v1.2.1 From 0328bdbf7c3d2def974ca3279fe8732f6f8fa968 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Fri, 11 Apr 2014 12:29:32 +0000 Subject: deploy: Check SSH connection for KVM deployment before starting Slight duplication is necessary, but it's only a few lines. We could move the duplicated code into the base class in 'morphlib.writeexts' if there was more duplication. --- kvm.check | 29 +++++++++++++++++++++++++++++ kvm.write | 9 +++------ 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/kvm.check b/kvm.check index be7c51c2..04c25069 100755 --- a/kvm.check +++ b/kvm.check @@ -17,11 +17,16 @@ '''Preparatory checks for Morph 'kvm' write extension''' import cliapp +import re +import urlparse import morphlib.writeexts class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): + + location_pattern = '^/(?P[^/]+)(?P/.+)$' + def process_args(self, args): if len(args) != 1: raise cliapp.AppException('Wrong number of command line args') @@ -32,4 +37,28 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): 'Use the `ssh-rsync` write extension to deploy upgrades to an ' 'existing remote system.') + location = args[0] + ssh_host, vm_name, vm_path = self.check_and_parse_location(location) + + try: + cliapp.ssh_runcmd(ssh_host, ['true']) + except cliapp.AppException: + raise cliapp.AppException('Unable to SSH to %s' % ssh_host) + + def check_and_parse_location(self, location): + '''Check and parse the location argument to get relevant data.''' + + x = urlparse.urlparse(location) + + if x.scheme != 'kvm+ssh': + raise cliapp.AppException( + 'URL schema must be kvm+ssh in %s' % location) + + m = re.match(self.location_pattern, x.path) + if not m: + raise cliapp.AppException('Cannot parse location %s' % location) + + return x.netloc, m.group('guest'), m.group('path') + + KvmPlusSshCheckExtension().run() diff --git a/kvm.write b/kvm.write index 94560972..94a55daa 100755 --- a/kvm.write +++ b/kvm.write @@ -50,6 +50,8 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): ''' + location_pattern = '^/(?P[^/]+)(?P/.+)$' + def process_args(self, args): if len(args) != 2: raise cliapp.AppException('Wrong number of command line args') @@ -79,14 +81,9 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): def parse_location(self, location): '''Parse the location argument to get relevant data.''' - + x = urlparse.urlparse(location) - if x.scheme != 'kvm+ssh': - raise cliapp.AppException( - 'URL schema must be vbox+ssh in %s' % location) m = re.match('^/(?P[^/]+)(?P/.+)$', x.path) - if not m: - raise cliapp.AppException('Cannot parse location %s' % location) return x.netloc, m.group('guest'), m.group('path') def transfer(self, raw_disk, ssh_host, vm_path): -- cgit v1.2.1 From 7223d97177b570020941562c9c054584d41c5d7c Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Mon, 14 Apr 2014 12:35:26 +0000 Subject: deploy: Share SSH connectivity check in the common writeexts.py code Also, change it to log the real error message in morph.log before raising a more general exception to the user. --- kvm.check | 5 +---- nfsboot.check | 7 +------ 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/kvm.check b/kvm.check index 04c25069..6ce52e7e 100755 --- a/kvm.check +++ b/kvm.check @@ -40,10 +40,7 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): location = args[0] ssh_host, vm_name, vm_path = self.check_and_parse_location(location) - try: - cliapp.ssh_runcmd(ssh_host, ['true']) - except cliapp.AppException: - raise cliapp.AppException('Unable to SSH to %s' % ssh_host) + self.check_ssh_connectivity(ssh_host) def check_and_parse_location(self, location): '''Check and parse the location argument to get relevant data.''' diff --git a/nfsboot.check b/nfsboot.check index f84f187f..806e560a 100755 --- a/nfsboot.check +++ b/nfsboot.check @@ -56,12 +56,7 @@ class NFSBootCheckExtension(morphlib.writeexts.WriteExtension): version_label, location)) def test_good_server(self, server): - # Can be ssh'ed into - try: - cliapp.ssh_runcmd('root@%s' % server, ['true']) - except cliapp.AppException: - raise cliapp.AppException('You are unable to ssh into server %s' - % server) + self.check_ssh_connectivity(server) # Is an NFS server try: -- cgit v1.2.1 From 8bb0d71ba1ada3c26b6bfa553c2ceb831c161818 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Mon, 14 Apr 2014 13:19:20 +0000 Subject: deploy: Extra checks for KVM deployments Test that a VM with the given name does not already exist, and check that the files specified in ATTACH_DISKS do already exist. --- kvm.check | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/kvm.check b/kvm.check index 6ce52e7e..957d0893 100755 --- a/kvm.check +++ b/kvm.check @@ -41,6 +41,8 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): ssh_host, vm_name, vm_path = self.check_and_parse_location(location) self.check_ssh_connectivity(ssh_host) + self.check_no_existing_libvirt_vm(ssh_host, vm_name) + self.check_extra_disks_exist(ssh_host, self.parse_attach_disks()) def check_and_parse_location(self, location): '''Check and parse the location argument to get relevant data.''' @@ -57,5 +59,24 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): return x.netloc, m.group('guest'), m.group('path') + def check_no_existing_libvirt_vm(self, ssh_host, vm_name): + try: + cliapp.ssh_runcmd(ssh_host, + ['virsh', '--connect', 'qemu:///system', 'domstate', vm_name]) + except cliapp.AppException as e: + pass + else: + raise cliapp.AppException( + 'Host %s already has a VM named %s. You can use the ssh-rsync ' + 'write extension to deploy upgrades to existing machines.' % + (ssh_host, vm_name)) + + def check_extra_disks_exist(self, ssh_host, filename_list): + for filename in filename_list: + try: + cliapp.ssh_runcmd(ssh_host, ['ls', filename]) + except cliapp.AppException as e: + raise cliapp.AppException('Did not find file %s on host %s' % + (filename, ssh_host)) KvmPlusSshCheckExtension().run() -- cgit v1.2.1 From a0e42d6be4e73d58f3568264a399cf6a39fc3e42 Mon Sep 17 00:00:00 2001 From: Paul Sherwood Date: Sun, 20 Apr 2014 15:40:50 +0000 Subject: fix the Virtualbox version checking --- virtualbox-ssh.write | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 2a2f3f7b..b9d53579 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -116,7 +116,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): # comparing against the major, minor and patch numbers directly self.status(msg='Checking version of remote VirtualBox') build_id = cliapp.ssh_runcmd(ssh_host, ['VBoxManage', '--version']) - version_string = re.match(r"^([0-9\.])+.*$", build_id.strip()).group(1) + version_string = re.match(r"^([0-9\.]+).*$", build_id.strip()).group(1) return tuple(int(s or '0') for s in version_string.split('.')) def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart): @@ -130,7 +130,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): hostonly_iface = self.get_host_interface(ssh_host) - if self.virtualbox_version(ssh_host) < (4, 3): + if self.virtualbox_version(ssh_host) < (4, 3, 0): sataportcount_option = '--sataportcount' else: sataportcount_option = '--portcount' -- cgit v1.2.1 From 59697ddb89674493dd3621ffaf8fbf34f04668db Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Thu, 8 May 2014 18:43:15 +0100 Subject: Add help for install-files.configure Move the help out of the comment and into a help file, and add a clearer example. --- install-files.configure | 28 --------------------- install-files.configure.help | 60 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 28 deletions(-) create mode 100644 install-files.configure.help diff --git a/install-files.configure b/install-files.configure index 8ba61b4e..04dc5f18 100755 --- a/install-files.configure +++ b/install-files.configure @@ -33,34 +33,6 @@ import stat class InstallFilesConfigureExtension(cliapp.Application): - '''Install the files specified in the manifests listed in INSTALL_FILES - - Entries in the manifest are formatted as: - - [overwrite] - - Where the filename is how the file is found inside whatever directory - the manifest is stored in, and also the path within the system to - install to. - - Directories on the target must be created if they do not exist. - - This extension supports files, symlinks and directories. - - For example, - - 0100644 0 0 /etc/issue - - creates a regular file at /etc/issue with 644 permissions, - uid 0 and gid 0, if the file doesn't already exist. - - overwrite 0100644 0 0 /etc/issue - - creates a regular file at /etc/issue with 644 permissions, - uid 0 and gid 0, if the file already exists it is overwritten. - - ''' - def process_args(self, args): if not 'INSTALL_FILES' in os.environ: return diff --git a/install-files.configure.help b/install-files.configure.help new file mode 100644 index 00000000..eb3aab0c --- /dev/null +++ b/install-files.configure.help @@ -0,0 +1,60 @@ +help: | + Install a set of files onto a system + + To use this extension you create a directory of files you want to install + onto the target system. + + In this example we want to copy some ssh keys onto a system + + % mkdir sshkeyfiles + % mkdir -p sshkeyfiles/root/.ssh + % cp id_rsa sshkeyfiles/root/.ssh + % cp id_rsa.pub sshkeyfiles/root/.ssh + + Now we need to create a manifest file to set the file modes + and persmissions. The manifest file should be created inside the + directory that contains the files we're trying to install. + + cat << EOF > sshkeyfiles/manifest + 0040755 0 0 /root/.ssh + 0100600 0 0 /root/.ssh/id_rsa + 0100644 0 0 /root/.ssh/id_rsa.pub + EOF + + Then we add the path to our manifest to our cluster morph, + this path should be relative to the system definitions repository. + + INSTALL_FILES: sshkeysfiles/manifest + + More generally entries in the manifest are formatted as: + [overwrite] + + NOTE: Directories on the target must be created if they do not exist. + + The extension supports files, symlinks and directories. + + For example, + + 0100644 0 0 /etc/issue + + creates a regular file at /etc/issue with 644 permissions, + uid 0 and gid 0, if the file doesn't already exist. + + overwrite 0100644 0 0 /etc/issue + + creates a regular file at /etc/issue with 644 permissions, + uid 0 and gid 0, if the file already exists it is overwritten. + + 0100755 0 0 /usr/bin/foo + + creates an executable file at /usr/bin/foo + + 0040755 0 0 /etc/foodir + + creates a directory with 755 permissions + + 0120000 0 0 /usr/bin/bar + + creates a symlink at /usr/bin/bar + + NOTE: You will still need to make a symlink in the manifest directory. -- cgit v1.2.1 From 38c8a61410e4ba73d87d43a6101ecefb28433f13 Mon Sep 17 00:00:00 2001 From: Daniel Silverstone Date: Fri, 16 May 2014 09:39:37 +0000 Subject: VirtualBox Write Extension: Vagrant support Add support to the VirtualBox write extension to notice if we are doing a Vagrant Basebox installation and not do the clever network setup we normally do to allow machines to talk to one another since this confuses Vagrant quite a bit if it is left in. --- virtualbox-ssh.write | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index b9d53579..47584b83 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -63,7 +63,9 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args ssh_host, vm_name, vdi_path = self.parse_location(location) autostart = self.get_environment_boolean('AUTOSTART') - + + vagrant = self.get_environment_boolean('VAGRANT') + fd, raw_disk = tempfile.mkstemp() os.close(fd) self.create_local_system(temp_root, raw_disk) @@ -72,7 +74,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): self.transfer_and_convert_to_vdi( raw_disk, ssh_host, vdi_path) self.create_virtualbox_guest(ssh_host, vm_name, vdi_path, - autostart) + autostart, vagrant) except BaseException: sys.stderr.write('Error deploying to VirtualBox') os.remove(raw_disk) @@ -119,7 +121,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): version_string = re.match(r"^([0-9\.]+).*$", build_id.strip()).group(1) return tuple(int(s or '0') for s in version_string.split('.')) - def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart): + def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart, vagrant): '''Create the VirtualBox virtual machine.''' self.status(msg='Create VirtualBox virtual machine') @@ -128,7 +130,8 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): vcpu_count = str(self.get_vcpu_count()) - hostonly_iface = self.get_host_interface(ssh_host) + if not vagrant: + hostonly_iface = self.get_host_interface(ssh_host) if self.virtualbox_version(ssh_host) < (4, 3, 0): sataportcount_option = '--sataportcount' @@ -139,15 +142,20 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): ['createvm', '--name', vm_name, '--ostype', 'Linux26_64', '--register'], ['modifyvm', vm_name, '--ioapic', 'on', - '--memory', ram_mebibytes, '--cpus', vcpu_count, - '--nic1', 'hostonly', '--hostonlyadapter1', hostonly_iface, - '--nic2', 'nat', '--natnet2', 'default'], + '--memory', ram_mebibytes, '--cpus', vcpu_count], ['storagectl', vm_name, '--name', 'SATA Controller', '--add', 'sata', '--bootable', 'on', sataportcount_option, '2'], ['storageattach', vm_name, '--storagectl', 'SATA Controller', '--port', '0', '--device', '0', '--type', 'hdd', '--medium', vdi_path], ] + if vagrant: + commands[1].extend(['--nic1', 'nat', + '--natnet1', 'default']) + else: + commands[1].extend(['--nic1', 'hostonly', + '--hostonlyadapter1', hostonly_iface, + '--nic2', 'nat', '--natnet2', 'default']) attach_disks = self.parse_attach_disks() for device_no, disk in enumerate(attach_disks, 1): -- cgit v1.2.1 From ddaf9840bbe3ad3a45fff341b1cfdf2985fd760b Mon Sep 17 00:00:00 2001 From: Daniel Silverstone Date: Fri, 16 May 2014 16:18:54 +0000 Subject: Fix long line --- virtualbox-ssh.write | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 47584b83..42585f5e 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -121,7 +121,8 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): version_string = re.match(r"^([0-9\.]+).*$", build_id.strip()).group(1) return tuple(int(s or '0') for s in version_string.split('.')) - def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart, vagrant): + def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart, + vagrant): '''Create the VirtualBox virtual machine.''' self.status(msg='Create VirtualBox virtual machine') -- cgit v1.2.1 From 6efbcd6ef631a79f73d2429622296ddfbde09003 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 20 May 2014 09:37:49 +0000 Subject: deploy: Do sanity checks earlier in ssh-rsync (upgrade) extension --- ssh-rsync.check | 24 ++++++++++++++++++++++++ ssh-rsync.write | 26 -------------------------- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/ssh-rsync.check b/ssh-rsync.check index 90029cb4..6a776ce9 100755 --- a/ssh-rsync.check +++ b/ssh-rsync.check @@ -33,4 +33,28 @@ class SshRsyncCheckExtension(morphlib.writeexts.WriteExtension): 'Baserock machines. It cannot be used for an initial ' 'deployment.') + location = args[0] + self.check_ssh_connectivity(location) + self.check_is_baserock_system(location) + + # The new system that being deployed as an upgrade must contain + # baserock-system-config-sync and system-version-manager. However, the + # old system simply needs to have SSH and rsync. + self.check_command_exists(location, 'rsync') + + def check_is_baserock_system(self, location): + output = cliapp.ssh_runcmd(location, ['sh', '-c', + 'test -d /baserock || echo -n dirnotfound']) + if output == 'dirnotfound': + raise cliapp.AppException('%s is not a baserock system' + % location) + + def check_command_exists(self, location, command): + test = 'type %s > /dev/null 2>&1 || echo -n cmdnotfound' % command + output = cliapp.ssh_runcmd(location, ['sh', '-c', test]) + if output == 'cmdnotfound': + raise cliapp.AppException( + "%s does not have %s" % (location, command)) + + SshRsyncCheckExtension().run() diff --git a/ssh-rsync.write b/ssh-rsync.write index 509520ae..c139b6c0 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -51,7 +51,6 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): temp_root, location = args - self.check_valid_target(location) self.upgrade_remote_system(location, temp_root) def upgrade_remote_system(self, location, temp_root): @@ -145,30 +144,5 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): if (line_words[1] == '/' and line_words[0] != 'rootfs'): return line_words[0] - def check_valid_target(self, location): - try: - cliapp.ssh_runcmd(location, ['true']) - except Exception as e: - raise cliapp.AppException('%s does not respond to ssh:\n%s' - % (location, e)) - - output = cliapp.ssh_runcmd(location, ['sh', '-c', - 'test -d /baserock || echo -n dirnotfound']) - if output == 'dirnotfound': - raise cliapp.AppException('%s is not a baserock system' - % location) - - def check_command_exists(command): - test = 'type %s > /dev/null 2>&1 || echo -n cmdnotfound' % command - output = cliapp.ssh_runcmd(location, ['sh', '-c', test]) - if output == 'cmdnotfound': - raise cliapp.AppException( - "%s does not have %s" % (location, command)) - - # The deploy requires baserock-system-config-sync and - # system-version-manager in the new system only. The old system doesn't - # need to have them at all. - check_command_exists('rsync') - SshRsyncWriteExtension().run() -- cgit v1.2.1 From dd380544e7c3a9f6c47e23639bb0a45958c3e446 Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Mon, 2 Jun 2014 10:53:29 +0000 Subject: Add initramfs write extension This creates a gzipped cpio archive that may be used as an initramfs. It is hard-coded to use gzip to compress the initramfs, since it's the most common way to do it. This is unfortunate, since the busybox gzip utility only allows maximum compression, which is rather slow and doesn't give progress reporting, so you can easily think it's gotten stuck. It's possible to use other compression formats, but they need the kernel to be built with them supported, and in the case of lz4, unusual userland tools to create it, since the version of lz4 supported in the kernel is not what the standard lz4 tools produce. --- initramfs.write | 27 +++++++++++++++++++++++++++ initramfs.write.help | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100755 initramfs.write create mode 100644 initramfs.write.help diff --git a/initramfs.write b/initramfs.write new file mode 100755 index 00000000..815772f2 --- /dev/null +++ b/initramfs.write @@ -0,0 +1,27 @@ +#!/bin/sh +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# =*= License: GPL-2 =*= + +set -e + +ROOTDIR="$1" +INITRAMFS_PATH="$2" + +(cd "$ROOTDIR" && + find . -print0 | + cpio -0 -H newc -o | + gzip -c) >"$INITRAMFS_PATH" diff --git a/initramfs.write.help b/initramfs.write.help new file mode 100644 index 00000000..29a9d266 --- /dev/null +++ b/initramfs.write.help @@ -0,0 +1,35 @@ +help: | + Create an initramfs for a system by taking an existing system and + converting it to the appropriate format. + + The system must have a `/init` executable as the userland entry-point. + This can have a different path, if `rdinit=$path` is added to + the kernel command line. This can be added to the `rawdisk`, + `virtualbox-ssh` and `kvm` write extensions with the `KERNEL_CMDLINE` + option. + + It is possible to use a ramfs as the final rootfs without a `/init` + executable, by setting `root=/dev/mem`, or `rdinit=/sbin/init`, + but this is beyond the scope for the `initramfs.write` extension. + + The intended use of initramfs.write is to be part of a nested + deployment, so the parent system has an initramfs stored as + `/boot/initramfs.gz`. See the following example: + + name: initramfs-test + kind: cluster + systems: + - morph: minimal-system-x86_64-generic + deploy: + system: + type: rawdisk + location: initramfs-system-x86_64.img + DISK_SIZE: 1G + HOSTNAME: initramfs-system + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: initramfs-x86_64 + deploy: + initramfs: + type: initramfs + location: boot/initramfs.gz -- cgit v1.2.1 From 3a8c02eb8d9beac6ec6c0570c9a50de72b23fc52 Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Mon, 2 Jun 2014 11:09:29 +0000 Subject: Add initramfs support to write extensions that produce disks If INITRAMFS_PATH is specified and the file exists, then the produced kernel command line will use root=UUID=$uuid_of_created_disk rather than root=/dev/sda, which may be incorrect. Help files have been updated to mention the new option. This leads to an unfortunate duplication of the path to the initramfs, in both the location field of the nested deployment and the INITRAMFS_PATH of the disk image creation. However, an initramfs could be produced by a chunk and put in the same place, so it doesn't make sense to couple the rawdisk and initramfs write extensions to remove this duplication. Similarly, there may be multiple valid initramfs in the rootfs e.g. extlinux loads a hypervisor, which is Linux + initramfs, and the initramfs then boots a guest Linux system, which uses a different initramfs. This makes it important to explicitly let the rootfs write extensions know which to use, or not as the case may be. util-linux's blkid is required, since the busybox version ignores the options to filter its output, and parsing the output is undesirable. Because syslinux's btrfs subvolume support is limited to being able to use a non-0 default subvolume, the initramfs has to be copied out of the run-time rootfs subvolume and into the boot subvolume. This pushed the required disk space of a minimal system over the 512M threshold because we do not have the userland tooling support to be able to do a btrfs file contents clone. --- kvm.write.help | 4 ++++ rawdisk.write.help | 4 ++++ virtualbox-ssh.write.help | 4 ++++ 3 files changed, 12 insertions(+) create mode 100644 kvm.write.help create mode 100644 virtualbox-ssh.write.help diff --git a/kvm.write.help b/kvm.write.help new file mode 100644 index 00000000..8b5053a5 --- /dev/null +++ b/kvm.write.help @@ -0,0 +1,4 @@ +help: | + The INITRAMFS_PATH option can be used to specify the location of an + initramfs for syslinux to tell Linux to use, rather than booting + the rootfs directly. diff --git a/rawdisk.write.help b/rawdisk.write.help index a514a4e8..298d441c 100644 --- a/rawdisk.write.help +++ b/rawdisk.write.help @@ -5,3 +5,7 @@ help: | The `location` argument is a pathname to the image to be created or upgraded. + + The INITRAMFS_PATH option can be used to specify the location of an + initramfs for syslinux to tell Linux to use, rather than booting + the rootfs directly. diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help new file mode 100644 index 00000000..8b5053a5 --- /dev/null +++ b/virtualbox-ssh.write.help @@ -0,0 +1,4 @@ +help: | + The INITRAMFS_PATH option can be used to specify the location of an + initramfs for syslinux to tell Linux to use, rather than booting + the rootfs directly. -- cgit v1.2.1 From 6063df929fb29d152b0b1b7fadf2f2c3dc7327c4 Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Wed, 4 Jun 2014 16:06:29 +0000 Subject: initramfs.write: create parent directories of location --- initramfs.write | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/initramfs.write b/initramfs.write index 815772f2..f8af6d84 100755 --- a/initramfs.write +++ b/initramfs.write @@ -23,5 +23,5 @@ INITRAMFS_PATH="$2" (cd "$ROOTDIR" && find . -print0 | - cpio -0 -H newc -o | - gzip -c) >"$INITRAMFS_PATH" + cpio -0 -H newc -o) | + gzip -c | install -D -m644 /dev/stdin "$INITRAMFS_PATH" -- cgit v1.2.1 From cc059a443b1dbcd214075dcaf95d601e47ca655f Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 10 Jun 2014 17:18:37 +0000 Subject: Check for presence of btrfs before trying to use it If btrfs is not present in the kernel we end up with strange output like this: Error creating disk image2014-06-10 16:00:40 [devel-system-x86_64-generic][my-raw-disk-image][rawdisk.write]Failure to create disk image at /src/tmp/testdev.img ERROR: Command failed: mount -o loop /src/tmp/testdev.img /src/tmp/deployments/tmpQ7wXO1/tmp4lVDcu/tmpvHSzDE mount: mounting /dev/loop0 on /src/tmp/deployments/tmpQ7wXO1/tmp4lVDcu/tmpvHSzDE failed: Device or resource busy To avoid this confusing error, Morph should explicitly check first. --- kvm.check | 2 ++ openstack.check | 2 ++ rawdisk.check | 31 +++++++++++++++++++++++++++++++ virtualbox-ssh.check | 2 ++ 4 files changed, 37 insertions(+) create mode 100755 rawdisk.check diff --git a/kvm.check b/kvm.check index 957d0893..1bb4007a 100755 --- a/kvm.check +++ b/kvm.check @@ -31,6 +31,8 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): if len(args) != 1: raise cliapp.AppException('Wrong number of command line args') + self.require_btrfs_in_deployment_host_kernel() + upgrade = self.get_environment_boolean('UPGRADE') if upgrade: raise cliapp.AppException( diff --git a/openstack.check b/openstack.check index a9a8fe1b..b5173011 100755 --- a/openstack.check +++ b/openstack.check @@ -26,6 +26,8 @@ class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): if len(args) != 1: raise cliapp.AppException('Wrong number of command line args') + self.require_btrfs_in_deployment_host_kernel() + upgrade = self.get_environment_boolean('UPGRADE') if upgrade: raise cliapp.AppException( diff --git a/rawdisk.check b/rawdisk.check new file mode 100755 index 00000000..6a656ee7 --- /dev/null +++ b/rawdisk.check @@ -0,0 +1,31 @@ +#!/usr/bin/python +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +'''Preparatory checks for Morph 'rawdisk' write extension''' + +import cliapp + +import morphlib.writeexts + + +class RawdiskCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + self.require_btrfs_in_deployment_host_kernel() + +RawdiskCheckExtension().run() diff --git a/virtualbox-ssh.check b/virtualbox-ssh.check index 1aeb8999..57d54db1 100755 --- a/virtualbox-ssh.check +++ b/virtualbox-ssh.check @@ -26,6 +26,8 @@ class VirtualBoxPlusSshCheckExtension(morphlib.writeexts.WriteExtension): if len(args) != 1: raise cliapp.AppException('Wrong number of command line args') + self.require_btrfs_in_deployment_host_kernel() + upgrade = self.get_environment_boolean('UPGRADE') if upgrade: raise cliapp.AppException( -- cgit v1.2.1 From 0da35ea65a40526cd395b3f47b51743366efccbc Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Tue, 12 Aug 2014 13:59:09 +0000 Subject: Merge remote-tracking branch 'origin/baserock/james/writeexts_support_jetson' --- rawdisk.write | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/rawdisk.write b/rawdisk.write index 87edf7bf..1c2c5a84 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -85,11 +85,12 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): else: # we are upgrading and old system that does # not have an updated extlinux config file - if self.bootloader_is_wanted(): - self.install_extlinux(mp) + if self.bootloader_config_is_wanted(): + self.generate_bootloader_config(mp) + self.install_bootloader(mp) os.symlink(version_label, default_path) - if self.bootloader_is_wanted(): + if self.bootloader_config_is_wanted(): self.install_kernel(version_root, temp_root) self.unmount(mp) -- cgit v1.2.1 From 59d03b0ae4d1643bba0f0b2b83e85b7068092819 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 19 Aug 2014 15:05:05 +0000 Subject: deploy: Check correct usage of --upgrade for rawdisk deployments This avoids confusion when the user expected to be doing an initial deployment and wasn't aware that a file with the same name as the target already existed. Previously rawdisk.write would try to mount the file and upgrade it. Now we require the user to pass '--upgrade' when they intend to upgrade, as with other deployment extensions. --- rawdisk.check | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/rawdisk.check b/rawdisk.check index 6a656ee7..5e75abe2 100755 --- a/rawdisk.check +++ b/rawdisk.check @@ -20,6 +20,8 @@ import cliapp import morphlib.writeexts +import os + class RawdiskCheckExtension(morphlib.writeexts.WriteExtension): def process_args(self, args): @@ -28,4 +30,23 @@ class RawdiskCheckExtension(morphlib.writeexts.WriteExtension): self.require_btrfs_in_deployment_host_kernel() + location = args[0] + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + if not os.path.isfile(location): + raise cliapp.AppException( + 'Cannot upgrade %s: it is not an existing disk image' % + location) + + version_label = os.environ.get('VERSION_LABEL') + if version_label is None: + raise cliapp.AppException( + 'VERSION_LABEL was not given. It is required when ' + 'upgrading an existing system.') + else: + if os.path.exists(location): + raise cliapp.AppException( + 'Target %s already exists. Pass --upgrade if you want to ' + 'update an existing image.' % location) + RawdiskCheckExtension().run() -- cgit v1.2.1 From 8c4f5dd9adce099693c53d14c1a549d5b4fa88d1 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Fri, 29 Aug 2014 23:18:45 +0100 Subject: Add `morph upgrade` command, deprecate `morph deploy --upgrade` The arguments to `morph deploy` can get quite long, any way we can make it shorter and clearer is useful. We can also avoid having the strange --no-upgrade flag in future. --- rawdisk.check | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rawdisk.check b/rawdisk.check index 5e75abe2..acdc4de1 100755 --- a/rawdisk.check +++ b/rawdisk.check @@ -46,7 +46,7 @@ class RawdiskCheckExtension(morphlib.writeexts.WriteExtension): else: if os.path.exists(location): raise cliapp.AppException( - 'Target %s already exists. Pass --upgrade if you want to ' - 'update an existing image.' % location) + 'Target %s already exists. Use `morph upgrade` if you ' + 'want to update an existing image.' % location) RawdiskCheckExtension().run() -- cgit v1.2.1 From 7f566024ace800ca358e1bc73ce7e5c237ed6e21 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Fri, 5 Sep 2014 14:50:31 +0100 Subject: Check OpenStack credentials in openstack.check If the credentials are wrong, then morph will fail before attempting the OpenStack deployment. To achieve that openstack.check will attempt to run `glance image-list`. --- openstack.check | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ openstack.write | 14 +------------- 2 files changed, 49 insertions(+), 13 deletions(-) diff --git a/openstack.check b/openstack.check index b5173011..d9d3ef24 100755 --- a/openstack.check +++ b/openstack.check @@ -17,6 +17,8 @@ '''Preparatory checks for Morph 'openstack' write extension''' import cliapp +import os +import urlparse import morphlib.writeexts @@ -34,4 +36,50 @@ class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): 'Use the `ssh-rsync` write extension to deploy upgrades to an ' 'existing remote system.') + location = args[0] + self.check_location(location) + + os_params = self.get_openstack_parameters() + + self.check_openstack_parameters(location, os_params) + + def get_openstack_parameters(self): + '''Check the environment variables needed and returns all. + + The environment variables are described in the class documentation. + ''' + + keys = ('OPENSTACK_USER', 'OPENSTACK_TENANT', + 'OPENSTACK_IMAGENAME', 'OPENSTACK_PASSWORD') + for key in keys: + if key not in os.environ: + raise cliapp.AppException(key + ' was not given') + return (os.environ[key] for key in keys) + + + def check_location(self, location): + x = urlparse.urlparse(location) + if x.scheme not in ['http', 'https']: + raise cliapp.AppException('URL schema must be http or https in %s' \ + % location) + if (x.path != '/v2.0' and x.path != '/v2.0/'): + raise cliapp.AppException('API version must be v2.0 in %s'\ + % location) + + def check_openstack_parameters(self, auth_url, os_params): + '''Check OpenStack credentials using glance image-list''' + self.status(msg='Checking OpenStack credentials...') + + username, tenant_name, image_name, password = os_params + cmdline = ['glance', + '--os-username', username, + '--os-tenant-name', tenant_name, + '--os-password', password, + '--os-auth-url', auth_url, + 'image-list'] + try: + cliapp.runcmd(cmdline) + except cliapp.AppException: + raise cliapp.AppException('Wrong OpenStack credentals.') + OpenStackCheckExtension().run() diff --git a/openstack.write b/openstack.write index 8ee8767e..ac2e2c8a 100755 --- a/openstack.write +++ b/openstack.write @@ -96,27 +96,15 @@ class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): self.unmount(mp) def get_openstack_parameters(self): - '''Check the environment variables needed and returns all. + '''Get the environment variables needed. The environment variables are described in the class documentation. ''' keys = ('OPENSTACK_USER', 'OPENSTACK_TENANT', 'OPENSTACK_IMAGENAME', 'OPENSTACK_PASSWORD') - for key in keys: - if key not in os.environ: - raise cliapp.AppException(key + ' was not given') return (os.environ[key] for key in keys) - def check_location(self, location): - x = urlparse.urlparse(location) - if x.scheme != 'http': - raise cliapp.AppException('URL schema must be http in %s' \ - % location) - if (x.path != '/v2.0' and x.path != '/v2.0/'): - raise cliapp.AppException('API version must be v2.0 in %s'\ - % location) - def configure_openstack_image(self, raw_disk, auth_url, os_params): '''Configure the image in OpenStack using glance-client''' self.status(msg='Configuring OpenStack image...') -- cgit v1.2.1 From 65ed235de623fd152dd2967b9ff2e1f60626c658 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Fri, 8 Aug 2014 13:17:01 +0000 Subject: Transfer sparse files faster for kvm, vbox deployment The KVM and VirtualBox deployments use sparse files for raw disk images. This means they can store a large disk (say, tens or hundreds of gigabytes) without using more disk space than is required for the actual content (e.g., a gigabyte or so for the files in the root filesystem). The kernel and filesystem make the unwritten parts of the disk image look as if they are filled with zero bytes. This is good. However, during deployment those sparse files get transferred as if there really are a lot of zeroes. Those zeroes take a lot of time to transfer. rsync, for example, does not handle large holes efficiently. This change introduces a couple of helper tools (morphlib/xfer-hole and morphlib/recv-hole), which transfer the holes more efficiently. The xfer-hole program reads a file and outputs records like these: DATA 123 binary data (exaclyt 123 bytes and no newline at the end) HOLE 3245 xfer-hole can do this efficiently, without having to read through all the zeroes in the holes, using the SEEK_DATA and SEEK_HOLE arguments to lseek. Using this, the holes take only take a few bytes each, making it possible to transfer a disk image faster. In my benchmarks, transferring a 100G byte disk image took about 100 seconds for KVM, and 220 seconds for VirtualBox (which needs to more work at the receiver to convert the raw disk to a VDI). Both benchmarks were from a VM on my laptop to the laptop itself. The interesting bit here is that the receiver (recv-hole) is simple enough that it can be implemented in a bit of shell script, and the text of the shell script can be run on the remote end by giving it to ssh as a command line argument. This means there is no need to install any special tools on the receiver, which makes using this improvement much simpler. --- kvm.write | 13 ++++++++++--- virtualbox-ssh.write | 16 +++++++++++----- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/kvm.write b/kvm.write index 94a55daa..3e3b3eb1 100755 --- a/kvm.write +++ b/kvm.write @@ -90,9 +90,16 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '''Transfer raw disk image to libvirt host.''' self.status(msg='Transferring disk image') - target = '%s:%s' % (ssh_host, vm_path) - with open(raw_disk, 'rb') as f: - cliapp.runcmd(['rsync', '-szS', raw_disk, target]) + + xfer_hole_path = morphlib.util.get_data_path('xfer-hole') + recv_hole = morphlib.util.get_data('recv-hole') + + cliapp.runcmd( + ['python', xfer_hole_path, raw_disk], + ['ssh', ssh_host, + 'sh', '-c', cliapp.shell_quote(recv_hole), + 'dummy-argv0', 'file', vm_path], + stdout=None, stderr=None) def create_libvirt_guest(self, ssh_host, vm_name, vm_path, autostart): '''Create the libvirt virtual machine.''' diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 42585f5e..1aebe490 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -102,11 +102,17 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '''Transfer raw disk image to VirtualBox host, and convert to VDI.''' self.status(msg='Transfer disk and convert to VDI') - with open(raw_disk, 'rb') as f: - cliapp.ssh_runcmd(ssh_host, - ['VBoxManage', 'convertfromraw', 'stdin', vdi_path, - str(os.path.getsize(raw_disk))], - stdin=f) + + st = os.lstat(raw_disk) + xfer_hole_path = morphlib.util.get_data_path('xfer-hole') + recv_hole = morphlib.util.get_data('recv-hole') + + cliapp.runcmd( + ['python', xfer_hole_path, raw_disk], + ['ssh', ssh_host, + 'sh', '-c', cliapp.shell_quote(recv_hole), + 'dummy-argv0', 'vbox', vdi_path, str(st.st_size)], + stdout=None, stderr=None) def virtualbox_version(self, ssh_host): 'Get the version number of the VirtualBox running on the remote host.' -- cgit v1.2.1 From 93d932f6a62e60357542e15f028551b7221c1720 Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Fri, 5 Sep 2014 14:17:53 +0000 Subject: Fix shell quoting for ssh remote command lines Found by Richard Maw. --- kvm.write | 8 +++++--- virtualbox-ssh.write | 9 ++++++--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/kvm.write b/kvm.write index 3e3b3eb1..16f188b5 100755 --- a/kvm.write +++ b/kvm.write @@ -94,11 +94,13 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): xfer_hole_path = morphlib.util.get_data_path('xfer-hole') recv_hole = morphlib.util.get_data('recv-hole') + ssh_remote_cmd = [ + 'sh', '-c', recv_hole, 'dummy-argv0', 'file', vm_path + ] + cliapp.runcmd( ['python', xfer_hole_path, raw_disk], - ['ssh', ssh_host, - 'sh', '-c', cliapp.shell_quote(recv_hole), - 'dummy-argv0', 'file', vm_path], + ['ssh', ssh_host] + map(cliapp.shell_quote, ssh_remote_cmd), stdout=None, stderr=None) def create_libvirt_guest(self, ssh_host, vm_name, vm_path, autostart): diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 1aebe490..39ea8f86 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -107,11 +107,14 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): xfer_hole_path = morphlib.util.get_data_path('xfer-hole') recv_hole = morphlib.util.get_data('recv-hole') + ssh_remote_cmd = [ + 'sh', '-c', recv_hole, + 'dummy-argv0', 'vbox', vdi_path, str(st.st_size), + ] + cliapp.runcmd( ['python', xfer_hole_path, raw_disk], - ['ssh', ssh_host, - 'sh', '-c', cliapp.shell_quote(recv_hole), - 'dummy-argv0', 'vbox', vdi_path, str(st.st_size)], + ['ssh', ssh_host] + map(cliapp.shell_quote, ssh_remote_cmd), stdout=None, stderr=None) def virtualbox_version(self, ssh_host): -- cgit v1.2.1 From 03e794ca09e1583872fcc1c560a5ec7016983cb3 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Tue, 9 Sep 2014 17:49:22 +0100 Subject: Fix openstack write/check exts to pass the tests --- openstack.check | 2 +- openstack.write | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/openstack.check b/openstack.check index d9d3ef24..edc37cc1 100755 --- a/openstack.check +++ b/openstack.check @@ -60,7 +60,7 @@ class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): def check_location(self, location): x = urlparse.urlparse(location) if x.scheme not in ['http', 'https']: - raise cliapp.AppException('URL schema must be http or https in %s' \ + raise cliapp.AppException('URL schema must be http or https in %s'\ % location) if (x.path != '/v2.0' and x.path != '/v2.0/'): raise cliapp.AppException('API version must be v2.0 in %s'\ diff --git a/openstack.write b/openstack.write index ac2e2c8a..dc18f9aa 100755 --- a/openstack.write +++ b/openstack.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013 - 2014 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by -- cgit v1.2.1 From abffc90e8a8150a1279f1d9c9722239e832e7172 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Tue, 9 Sep 2014 17:51:28 +0100 Subject: Fix up openstack.write extension The openstack.write extension was calling a nonexistent method 'check_location'. This metod was moved to openstack.check in the commit ba7d1d1ed3bad002ce36e5d4adf4e3794625091a. --- openstack.write | 1 - 1 file changed, 1 deletion(-) diff --git a/openstack.write b/openstack.write index dc18f9aa..516fe367 100755 --- a/openstack.write +++ b/openstack.write @@ -63,7 +63,6 @@ class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Wrong number of command line args') temp_root, location = args - self.check_location(location) os_params = self.get_openstack_parameters() -- cgit v1.2.1 From 4fff47462e598d475e930893383f9c27e6f2c381 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Wed, 1 Oct 2014 08:21:29 +0000 Subject: ssh-rsync: gett UUID of the disk before writing fstab With this patch, the fstab of the system to be deployed as an upgrade will be conifgured using the UUID of the disk. Now when doing an upgrade is not needed to specify the ROOT_DEVICE. --- ssh-rsync.write | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index c139b6c0..468e5a1f 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -54,9 +54,12 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): self.upgrade_remote_system(location, temp_root) def upgrade_remote_system(self, location, temp_root): - self.complete_fstab_for_btrfs_layout(temp_root) - root_disk = self.find_root_disk(location) + uuid = cliapp.ssh_runcmd(location, ['blkid', '-s', 'UUID', '-o', + 'value', root_disk]).strip() + + self.complete_fstab_for_btrfs_layout(temp_root, uuid) + version_label = os.environ.get('VERSION_LABEL') autostart = self.get_environment_boolean('AUTOSTART') -- cgit v1.2.1 From bd6bae145a4f7064caa4ee49f7e815452d4469e8 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Wed, 8 Oct 2014 10:28:23 +0000 Subject: deploy: Make ssh-rsync upgrade extension handle unset VERSION_LABEL It now gives an error message. Previously it would fail with a backtrace like this: 2014-10-08 09:51:37 [systems/genivi-baseline-system-armv7lhf-jetson.morph][self]Removing temporary mounts Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/cliapp/app.py", line 190, in _run self.process_args(args) File "/src/morph/morphlib/exts/ssh-rsync.write", line 54, in process_args self.upgrade_remote_system(location, temp_root) File "/src/morph/morphlib/exts/ssh-rsync.write", line 107, in upgrade_remote_system location, ['btrfs', 'subvolume', 'delete', orig_dir]) UnboundLocalError: local variable 'orig_dir' referenced before assignment --- ssh-rsync.check | 7 ++++++- ssh-rsync.write | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ssh-rsync.check b/ssh-rsync.check index 6a776ce9..11446c28 100755 --- a/ssh-rsync.check +++ b/ssh-rsync.check @@ -18,8 +18,9 @@ import cliapp -import morphlib.writeexts +import os +import morphlib.writeexts class SshRsyncCheckExtension(morphlib.writeexts.WriteExtension): def process_args(self, args): @@ -33,6 +34,10 @@ class SshRsyncCheckExtension(morphlib.writeexts.WriteExtension): 'Baserock machines. It cannot be used for an initial ' 'deployment.') + if os.environ.get('VERSION_LABEL', '') == '': + raise cliapp.AppException( + 'A VERSION_LABEL must be set when deploying an upgrade.') + location = args[0] self.check_ssh_connectivity(location) self.check_is_baserock_system(location) diff --git a/ssh-rsync.write b/ssh-rsync.write index 468e5a1f..775619ec 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -60,7 +60,7 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): self.complete_fstab_for_btrfs_layout(temp_root, uuid) - version_label = os.environ.get('VERSION_LABEL') + version_label = os.environ['VERSION_LABEL'] autostart = self.get_environment_boolean('AUTOSTART') self.status(msg='Creating remote mount point') -- cgit v1.2.1 From 7ec3d106f49fd4af2d193afc976c2230a9369a7e Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Thu, 9 Oct 2014 15:09:03 +0000 Subject: ssh-rsync: Don't delete version if it exists A quirk in the resource cleanup code meant that if you gave the same version label when deploying a new version, then it would fail, then remove the old version, as it had assumed that it was the one to create those directories. This patch fixes this issue by making short context managers for all the resource allocation, so cleanup is done by walking up the context managers, so only the mount and the temporary directory need to be cleaned up if the `mkdir "$VERSION_ROOT"` fails. I've tested this with a deploy of a version that doesn't already exist, and the version I'm currently running, so I can conclusively say it's fixed that problem. --- ssh-rsync.write | 182 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 105 insertions(+), 77 deletions(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index 775619ec..2391d48c 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -18,6 +18,7 @@ '''A Morph deployment write extension for upgrading systems over ssh.''' +import contextlib import cliapp import os import sys @@ -45,107 +46,134 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): ''' - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - temp_root, location = args - - self.upgrade_remote_system(location, temp_root) - - def upgrade_remote_system(self, location, temp_root): - root_disk = self.find_root_disk(location) - uuid = cliapp.ssh_runcmd(location, ['blkid', '-s', 'UUID', '-o', - 'value', root_disk]).strip() - - self.complete_fstab_for_btrfs_layout(temp_root, uuid) + def find_root_disk(self, location): + '''Read /proc/mounts on location to find which device contains "/"''' - version_label = os.environ['VERSION_LABEL'] - autostart = self.get_environment_boolean('AUTOSTART') + self.status(msg='Finding device that contains "/"') + contents = cliapp.ssh_runcmd(location, ['cat', '/proc/mounts']) + for line in contents.splitlines(): + line_words = line.split() + if (line_words[1] == '/' and line_words[0] != 'rootfs'): + return line_words[0] + @contextlib.contextmanager + def _remote_mount_point(self, location): self.status(msg='Creating remote mount point') remote_mnt = cliapp.ssh_runcmd(location, ['mktemp', '-d']).strip() try: - self.status(msg='Mounting root disk') - cliapp.ssh_runcmd(location, ['mount', root_disk, remote_mnt]) - except Exception as e: - ssh_runcmd_ignore_failure(location, ['rmdir', remote_mnt]) - raise e + yield remote_mnt + finally: + self.status(msg='Removing remote mount point') + cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) + @contextlib.contextmanager + def _remote_mount(self, location, root_disk, mountpoint): + self.status(msg='Mounting root disk') + cliapp.ssh_runcmd(location, ['mount', root_disk, mountpoint]) try: - version_root = os.path.join(remote_mnt, 'systems', version_label) - orig_dir = os.path.join(version_root, 'orig') - - self.status(msg='Creating %s' % version_root) - cliapp.ssh_runcmd(location, ['mkdir', version_root]) - - self.create_remote_orig(location, version_root, remote_mnt, - temp_root) - - # Use the system-version-manager from the new system we just - # installed, so that we can upgrade from systems that don't have - # it installed. - self.status(msg='Calling system-version-manager to deploy upgrade') - deployment = os.path.join('/systems', version_label, 'orig') - system_config_sync = os.path.join( - remote_mnt, 'systems', version_label, 'orig', 'usr', 'bin', - 'baserock-system-config-sync') - system_version_manager = os.path.join( - remote_mnt, 'systems', version_label, 'orig', 'usr', 'bin', - 'system-version-manager') - cliapp.ssh_runcmd(location, - ['env', 'BASEROCK_SYSTEM_CONFIG_SYNC='+system_config_sync, - system_version_manager, 'deploy', deployment]) - - self.status(msg='Setting %s as the new default system' % - version_label) - cliapp.ssh_runcmd(location, - [system_version_manager, 'set-default', version_label]) - except Exception as e: - self.status(msg='Deployment failed') - ssh_runcmd_ignore_failure( - location, ['btrfs', 'subvolume', 'delete', orig_dir]) - ssh_runcmd_ignore_failure( - location, ['rm', '-rf', version_root]) - raise e + yield finally: - self.status(msg='Removing temporary mounts') - cliapp.ssh_runcmd(location, ['umount', remote_mnt]) - cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) + self.status(msg='Unmounting root disk') + cliapp.ssh_runcmd(location, ['umount', mountpoint]) + + @contextlib.contextmanager + def _created_version_root(self, location, remote_mnt, version_label): + version_root = os.path.join(remote_mnt, 'systems', version_label) + self.status(msg='Creating %(root)s', root=version_root) + cliapp.ssh_runcmd(location, ['mkdir', version_root]) + try: + yield version_root + except BaseException as e: + # catch all, we always want to clean up + self.status(msg='Cleaning up %(root)s', root=version_root) + ssh_runcmd_ignore_failure(location, ['rmdir', version_root]) + raise - if autostart: - self.status(msg="Rebooting into new system ...") - ssh_runcmd_ignore_failure(location, ['reboot']) + def get_old_orig(self, location, remote_mnt): + '''Identify which subvolume to snapshot from''' - def create_remote_orig(self, location, version_root, remote_mnt, - temp_root): - '''Create the subvolume version_root/orig on location''' + # rawdisk upgrades use 'factory' + return os.path.join(remote_mnt, 'systems', 'factory', 'orig') + @contextlib.contextmanager + def _created_orig_subvolume(self, location, remote_mnt, version_root): self.status(msg='Creating "orig" subvolume') old_orig = self.get_old_orig(location, remote_mnt) new_orig = os.path.join(version_root, 'orig') cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig]) + try: + yield new_orig + except BaseException as e: + ssh_runcmd_ignore_failure( + location, ['btrfs', 'subvolume', 'delete', new_orig]) + raise + def populate_remote_orig(self, location, new_orig, temp_root): + '''Populate the subvolume version_root/orig on location''' + + self.status(msg='Populating "orig" subvolume') cliapp.runcmd(['rsync', '-as', '--checksum', '--numeric-ids', '--delete', temp_root + os.path.sep, '%s:%s' % (location, new_orig)]) - def get_old_orig(self, location, remote_mnt): - '''Identify which subvolume to snapshot from''' + @contextlib.contextmanager + def _deployed_version(self, location, version_label, + system_config_sync, system_version_manager): + self.status(msg='Calling system-version-manager to deploy upgrade') + deployment = os.path.join('/systems', version_label, 'orig') + cliapp.ssh_runcmd(location, + ['env', 'BASEROCK_SYSTEM_CONFIG_SYNC='+system_config_sync, + system_version_manager, 'deploy', deployment]) + try: + yield deployment + except BaseException as e: + self.status(msg='Cleaning up failed version installation') + cliapp.ssh_runcmd(location, + [system_version_manager, 'remove', version_label]) + raise - # rawdisk upgrades use 'factory' - return os.path.join(remote_mnt, 'systems', 'factory', 'orig') + def upgrade_remote_system(self, location, temp_root): + root_disk = self.find_root_disk(location) + uuid = cliapp.ssh_runcmd(location, ['blkid', '-s', 'UUID', '-o', + 'value', root_disk]).strip() - def find_root_disk(self, location): - '''Read /proc/mounts on location to find which device contains "/"''' + self.complete_fstab_for_btrfs_layout(temp_root, uuid) - self.status(msg='Finding device that contains "/"') - contents = cliapp.ssh_runcmd(location, ['cat', '/proc/mounts']) - for line in contents.splitlines(): - line_words = line.split() - if (line_words[1] == '/' and line_words[0] != 'rootfs'): - return line_words[0] + version_label = os.environ['VERSION_LABEL'] + autostart = self.get_environment_boolean('AUTOSTART') + + with self._remote_mount_point(location) as remote_mnt, \ + self._remote_mount(location, root_disk, remote_mnt), \ + self._created_version_root(location, remote_mnt, + version_label) as version_root, \ + self._created_orig_subvolume(location, remote_mnt, + version_root) as orig: + self.populate_remote_orig(location, orig, temp_root) + system_config_sync = os.path.join( + remote_mnt, 'systems', version_label, 'orig', + 'usr', 'bin', 'baserock-system-config-sync') + system_version_manager = os.path.join( + remote_mnt, 'systems', version_label, 'orig', + 'usr', 'bin', 'system-version-manager') + with self._deployed_version(location, version_label, + system_config_sync, system_version_manager): + self.status(msg='Setting %(v)s as the new default system', + v=version_label) + cliapp.ssh_runcmd(location, [system_version_manager, + 'set-default', version_label]) + + if autostart: + self.status(msg="Rebooting into new system ...") + ssh_runcmd_ignore_failure(location, ['reboot']) + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + + self.upgrade_remote_system(location, temp_root) SshRsyncWriteExtension().run() -- cgit v1.2.1 From aa7005c9b4207ac32621433a95a0c9b44007f6b4 Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Fri, 10 Oct 2014 13:09:25 +0000 Subject: Merge branch 'baserock/richardmaw/fix-ssh-rsync-destroying-versions' Reviewed-by: Sam Thursfield Reviewed-by: Jim MacArthur Reviewed-by: Richard Ipsum --- ssh-rsync.write | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ssh-rsync.write b/ssh-rsync.write index 2391d48c..0ce89c7f 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -150,17 +150,17 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): self._created_orig_subvolume(location, remote_mnt, version_root) as orig: self.populate_remote_orig(location, orig, temp_root) - system_config_sync = os.path.join( - remote_mnt, 'systems', version_label, 'orig', - 'usr', 'bin', 'baserock-system-config-sync') - system_version_manager = os.path.join( - remote_mnt, 'systems', version_label, 'orig', - 'usr', 'bin', 'system-version-manager') + system_root = os.path.join(remote_mnt, 'systems', + version_label, 'orig') + config_sync = os.path.join(system_root, 'usr', 'bin', + 'baserock-system-config-sync') + version_manager = os.path.join(system_root, 'usr', 'bin', + 'system-version-manager') with self._deployed_version(location, version_label, - system_config_sync, system_version_manager): + config_sync, version_manager): self.status(msg='Setting %(v)s as the new default system', v=version_label) - cliapp.ssh_runcmd(location, [system_version_manager, + cliapp.ssh_runcmd(location, [version_manager, 'set-default', version_label]) if autostart: -- cgit v1.2.1 From ec477897b9d6afd3ab176ab512d12f36121618c1 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Wed, 5 Nov 2014 10:24:07 +0000 Subject: Use the default symlink when creating the orig subvolume. This patch solves the issue caused by upgrading a system without a factory version. Currently we are only using the factory version to snapshot its orig subvolume to make faster the transfer of the new content (rsync won't have to send everything). The default symlink may not be present, but it can't be deleted easily using system-version-manager. This is a quick fix, but in the future we may want to not harcode the path from where we snapshot the orig subvolume. Or improve system-version-manager to make sure that the default symlink is always present. --- rawdisk.write | 2 +- ssh-rsync.write | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rawdisk.write b/rawdisk.write index 1c2c5a84..12db4398 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -68,7 +68,7 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): version_root = os.path.join(mp, 'systems', version_label) os.mkdir(version_root) - old_orig = os.path.join(mp, 'systems', 'factory', 'orig') + old_orig = os.path.join(mp, 'systems', 'default', 'orig') new_orig = os.path.join(version_root, 'orig') cliapp.runcmd( ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig]) diff --git a/ssh-rsync.write b/ssh-rsync.write index 0ce89c7f..2d7258ba 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -92,8 +92,8 @@ class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): def get_old_orig(self, location, remote_mnt): '''Identify which subvolume to snapshot from''' - # rawdisk upgrades use 'factory' - return os.path.join(remote_mnt, 'systems', 'factory', 'orig') + # rawdisk upgrades use 'default' + return os.path.join(remote_mnt, 'systems', 'default', 'orig') @contextlib.contextmanager def _created_orig_subvolume(self, location, remote_mnt, version_root): -- cgit v1.2.1 From 00530b7da0ccd540619a4c5b35fcc9b284d4e1cf Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Tue, 25 Nov 2014 14:02:25 +0000 Subject: Whitespace and line endings? --- kvm.write | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/kvm.write b/kvm.write index 16f188b5..eeb7c6b5 100755 --- a/kvm.write +++ b/kvm.write @@ -31,23 +31,23 @@ import morphlib.writeexts class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): '''Create a KVM/LibVirt virtual machine during Morph's deployment. - + The location command line argument is the pathname of the disk image to be created. The user is expected to provide the location argument using the following syntax: - + kvm+ssh://HOST/GUEST/PATH - + where: - + * HOST is the host on which KVM/LibVirt is running * GUEST is the name of the guest virtual machine on that host * PATH is the path to the disk image that should be created, on that host - + The extension will connect to HOST via ssh to run libvirt's command line management tools. - + ''' location_pattern = '^/(?P[^/]+)(?P/.+)$' @@ -55,15 +55,15 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): def process_args(self, args): if len(args) != 2: raise cliapp.AppException('Wrong number of command line args') - + temp_root, location = args ssh_host, vm_name, vm_path = self.parse_location(location) autostart = self.get_environment_boolean('AUTOSTART') - + fd, raw_disk = tempfile.mkstemp() os.close(fd) self.create_local_system(temp_root, raw_disk) - + try: self.transfer(raw_disk, ssh_host, vm_path) self.create_libvirt_guest(ssh_host, vm_name, vm_path, autostart) @@ -105,7 +105,7 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): def create_libvirt_guest(self, ssh_host, vm_name, vm_path, autostart): '''Create the libvirt virtual machine.''' - + self.status(msg='Creating libvirt/kvm virtual machine') attach_disks = self.parse_attach_disks() @@ -135,4 +135,3 @@ class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): ['virsh', '--connect', 'qemu:///system', 'autostart', vm_name]) KvmPlusSshWriteExtension().run() - -- cgit v1.2.1 From f03570edacad3fe9cf5468c11324a027e028acc1 Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Tue, 25 Nov 2014 14:52:59 +0000 Subject: Document the write extension in the write.help file - Move docstring from .write to .write.help - Rework the content and formatting of the help information --- kvm.write | 20 -------------------- kvm.write.help | 29 ++++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/kvm.write b/kvm.write index eeb7c6b5..61c96676 100755 --- a/kvm.write +++ b/kvm.write @@ -30,26 +30,6 @@ import morphlib.writeexts class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): - '''Create a KVM/LibVirt virtual machine during Morph's deployment. - - The location command line argument is the pathname of the disk image - to be created. The user is expected to provide the location argument - using the following syntax: - - kvm+ssh://HOST/GUEST/PATH - - where: - - * HOST is the host on which KVM/LibVirt is running - * GUEST is the name of the guest virtual machine on that host - * PATH is the path to the disk image that should be created, - on that host - - The extension will connect to HOST via ssh to run libvirt's - command line management tools. - - ''' - location_pattern = '^/(?P[^/]+)(?P/.+)$' def process_args(self, args): diff --git a/kvm.write.help b/kvm.write.help index 8b5053a5..ad0e25f0 100644 --- a/kvm.write.help +++ b/kvm.write.help @@ -1,4 +1,27 @@ help: | - The INITRAMFS_PATH option can be used to specify the location of an - initramfs for syslinux to tell Linux to use, rather than booting - the rootfs directly. + + Create a KVM/LibVirt virtual machine during Morph's deployment. + + Parameters: + + * location: a custom URL scheme of the form `kvm+ssh://HOST/GUEST/PATH`, + where: + * HOST is the name of the host on which KVM/LibVirt is running + * GUEST is the name of the guest VM on that host + * PATH is the path to the disk image that should be created, + on that host. For example, + `kvm+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where + * `alice@192.168.122.1` is the target host as given to ssh, + **from within the development host** (which may be + different from the target host's normal address); + * `testsys` is the name of the new guest VM'; + * `/home/alice/testys.img` is the pathname of the disk image files + on the target host. + * HOSTNAME: the hostname of the **guest** VM within the network into which + it is being deployed + * DISK_SIZE: the size of the VM's primary virtual hard disk + * RAM_SIZE: The amount of RAM that the virtual machine should allocate for + itself from the host. + * VCPUS: the number of virtual CPUs for the VM + * INITRAMFS_PATH: the location of an initramfs for the bootloader to tell + Linux to use, rather than booting the rootfs directly. -- cgit v1.2.1 From 78f4fc473c71781722caff5d6ba819f21c381ab6 Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Wed, 26 Nov 2014 18:02:09 +0000 Subject: Add more information on existing parameters. Add AUTOSTART parameter --- kvm.write.help | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/kvm.write.help b/kvm.write.help index ad0e25f0..0b428e48 100644 --- a/kvm.write.help +++ b/kvm.write.help @@ -17,11 +17,19 @@ help: | * `testsys` is the name of the new guest VM'; * `/home/alice/testys.img` is the pathname of the disk image files on the target host. - * HOSTNAME: the hostname of the **guest** VM within the network into which - it is being deployed - * DISK_SIZE: the size of the VM's primary virtual hard disk - * RAM_SIZE: The amount of RAM that the virtual machine should allocate for - itself from the host. - * VCPUS: the number of virtual CPUs for the VM - * INITRAMFS_PATH: the location of an initramfs for the bootloader to tell - Linux to use, rather than booting the rootfs directly. + * HOSTNAME=name: the hostname of the **guest** VM within the network into + which it is being deployed + * DISK_SIZE=X: the size of the VM's primary virtual hard disk. `X` should + use a suffix of `K`, `M`, or `G` (in upper or lower case) to indicate + kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would create a + 100 gigabyte disk image. **This parameter is mandatory**. + * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate for + itself from the host. `X` is interpreted in the same was as for + DISK_SIZE`, and defaults to `1G` + * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do + not use more CPU cores than you have available physically (real cores, no + hyperthreads) + * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to + tell Linux to use, rather than booting the rootfs directly. + * AUTOSTART=` - allowed values are `yes` and `no` (default). If the + value is 'yes', the VM will be started when it has been deployed -- cgit v1.2.1 From d9069e2434fd152b0e3039839b6596c84376c4c3 Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Fri, 28 Nov 2014 10:10:29 +0000 Subject: Improve readability by adding blank lines --- kvm.write.help | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kvm.write.help b/kvm.write.help index 0b428e48..a9b63303 100644 --- a/kvm.write.help +++ b/kvm.write.help @@ -17,19 +17,25 @@ help: | * `testsys` is the name of the new guest VM'; * `/home/alice/testys.img` is the pathname of the disk image files on the target host. + * HOSTNAME=name: the hostname of the **guest** VM within the network into which it is being deployed + * DISK_SIZE=X: the size of the VM's primary virtual hard disk. `X` should use a suffix of `K`, `M`, or `G` (in upper or lower case) to indicate kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would create a 100 gigabyte disk image. **This parameter is mandatory**. + * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate for itself from the host. `X` is interpreted in the same was as for DISK_SIZE`, and defaults to `1G` + * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do not use more CPU cores than you have available physically (real cores, no hyperthreads) + * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to tell Linux to use, rather than booting the rootfs directly. + * AUTOSTART=` - allowed values are `yes` and `no` (default). If the value is 'yes', the VM will be started when it has been deployed -- cgit v1.2.1 From a2ac64a4baa72bb805d0aee2b9862c25a0fff89f Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Fri, 28 Nov 2014 10:14:53 +0000 Subject: Refere to .write.help file for documentation --- kvm.write | 6 +++++- virtualbox-ssh.write | 31 ++++++------------------------- 2 files changed, 11 insertions(+), 26 deletions(-) diff --git a/kvm.write b/kvm.write index 61c96676..30b43d6c 100755 --- a/kvm.write +++ b/kvm.write @@ -15,7 +15,11 @@ # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -'''A Morph deployment write extension for deploying to KVM+libvirt.''' +'''A Morph deployment write extension for deploying to KVM+libvirt. + +See file kvm.write.help for documentation + +''' import cliapp diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 39ea8f86..1b4de89c 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -20,6 +20,8 @@ VirtualBox is assumed to be running on a remote machine, which is accessed over ssh. The machine gets created, but not started. +See file virtualbox-ssh.write.help for documentation + ''' @@ -36,30 +38,10 @@ import morphlib.writeexts class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): - '''Create a VirtualBox virtual machine during Morph's deployment. - - The location command line argument is the pathname of the disk image - to be created. The user is expected to provide the location argument - using the following syntax: - - vbox+ssh://HOST/GUEST/PATH - - where: - - * HOST is the host on which VirtualBox is running - * GUEST is the name of the guest virtual machine on that host - * PATH is the path to the disk image that should be created, - on that host - - The extension will connect to HOST via ssh to run VirtualBox's - command line management tools. - - ''' - def process_args(self, args): if len(args) != 2: raise cliapp.AppException('Wrong number of command line args') - + temp_root, location = args ssh_host, vm_name, vdi_path = self.parse_location(location) autostart = self.get_environment_boolean('AUTOSTART') @@ -88,7 +70,7 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): def parse_location(self, location): '''Parse the location argument to get relevant data.''' - + x = urlparse.urlparse(location) if x.scheme != 'vbox+ssh': raise cliapp.AppException( @@ -169,11 +151,11 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): attach_disks = self.parse_attach_disks() for device_no, disk in enumerate(attach_disks, 1): - cmd = ['storageattach', vm_name, + cmd = ['storageattach', vm_name, '--storagectl', 'SATA Controller', '--port', str(device_no), '--device', '0', - '--type', 'hdd', + '--type', 'hdd', '--medium', disk] commands.append(cmd) @@ -242,4 +224,3 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): return iface VirtualBoxPlusSshWriteExtension().run() - -- cgit v1.2.1 From 5f4399accaf2f501bbf307c70c6d9b2d90ded27c Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Fri, 28 Nov 2014 10:15:26 +0000 Subject: More complete help documentation --- virtualbox-ssh.write.help | 87 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 3 deletions(-) diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help index 8b5053a5..95b2e247 100644 --- a/virtualbox-ssh.write.help +++ b/virtualbox-ssh.write.help @@ -1,4 +1,85 @@ help: | - The INITRAMFS_PATH option can be used to specify the location of an - initramfs for syslinux to tell Linux to use, rather than booting - the rootfs directly. + + Deploy a Baserock system as a *new* VirtualBox virtual machine. + (Use the `ssh-rsync` write extension to deploy upgrades to an *existing* + VM) + + Connects to HOST via ssh to run VirtualBox's command line management tools. + + Parameters: + + * location: a custom URL scheme of the form `vbox+ssh://HOST/GUEST/PATH`, + where: + * HOST is the name of the host on which VirtualBox is running + * GUEST is the name of the guest VM on that host + * PATH is the path to the disk image that should be created, + on that host. For example, + `vbox+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where + * `alice@192.168.122.1` is the target host as given to ssh, + **from within the development host** (which may be + different from the target host's normal address); + * `testsys` is the name of the new guest VM'; + * `/home/alice/testys.img` is the pathname of the disk image files + on the target host. + + * HOSTNAME=name: the hostname of the **guest** VM within the network into + which it is being deployed. + + * DISK_SIZE=X: **(MANDATORY)** the size of the VM's primary virtual hard + disk. `X` should use a suffix of `K`, `M`, or `G` (in upper or lower case) + to indicate kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` + would create a 100 gigabyte virtual hard disk. + + * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate for + itself from the host. `X` is interpreted in the same as for DISK_SIZE`, + and defaults to `1G`. + + * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do + not use more CPU cores than you have available physically (real cores, no + hyperthreads). + + * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to + tell Linux to use, rather than booting the rootfs directly. + + * AUTOSTART= - allowed values are `yes` and `no` (default). If the + value is 'yes', the VM will be started when it has been deployed. + + * VAGRANT= - allowed values are `yes` and `no` (default). If the + value is 'yes', then networking is configured so that the VM will work + with Vagrant. Otherwise networking is configured to run directly in + VirtualBox. + + * HOST_IPADDR= - the IP address of the VM host. + + * NETMASK= - the netmask of the VM host. + + * NETWORK_CONFIG= - `net_config` is used to set up the VM's + network interfaces. It is a string containing semi-colon separated + 'stanzas' where each stanza provides information about a network + interface. Each stanza is of the form name:type[,arg=value] e.g. + + lo:loopback + eth0:dhcp + eth1:static,address=10.0.0.1,netmask=255.255.0.0 + + An example of the NETWORK_CONFIG parameter + + `"lo:loopback;eth0:static,address=192.168.100.2,netmask=255.255.255.0;eth1:dhcp,hostname=$(hostname)"` + + It is useful to configure one interface to use NAT to give the VM access + to the outside world and another interface to use the Virtual Box host + adapter to allow you to access the Trove from the host machine. + + The NAT interface eth1 is set up to use dhcp, the host-only adapter + interface is configured statically. + + Note: you must give the host-only adapter interface an address that lies + **on the same network** as the host adapter. So if the host adapter has an + IP of 192.168.100.1 eth0 should have an address such as 192.168.100.42. + + The settings of the host adapter, including its IP can be changed either + in the VirtualBox manager UI (https://www.virtualbox.org/manual/ch03.html#settings-network) + or via the VBoxManage command line (https://www.virtualbox.org/manual/ch08.html#idp57572192) + + See Chapter 6 of the VirtualBox User Manual for more information about + virtual networking (https://www.virtualbox.org/manual/ch06.html) -- cgit v1.2.1 From e1d759e58c9feb4069cf18c430f5e410733d1d9f Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Mon, 1 Dec 2014 13:43:44 +0000 Subject: Action review comments - Document different ways of calling parameters - Allowed values for boolean parameters --- virtualbox-ssh.write.help | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help index 95b2e247..a19d8fb2 100644 --- a/virtualbox-ssh.write.help +++ b/virtualbox-ssh.write.help @@ -41,13 +41,12 @@ help: | * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to tell Linux to use, rather than booting the rootfs directly. - * AUTOSTART= - allowed values are `yes` and `no` (default). If the - value is 'yes', the VM will be started when it has been deployed. + * AUTOSTART= - boolean. If it is set, the VM will be started when + it has been deployed. - * VAGRANT= - allowed values are `yes` and `no` (default). If the - value is 'yes', then networking is configured so that the VM will work - with Vagrant. Otherwise networking is configured to run directly in - VirtualBox. + * VAGRANT= - boolean. If it is set, then networking is configured + so that the VM will work with Vagrant. Otherwise networking is configured + to run directly in VirtualBox. * HOST_IPADDR= - the IP address of the VM host. -- cgit v1.2.1 From 5d19be62bfd39f834ce6b2af020b41c6365e73af Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Mon, 1 Dec 2014 13:52:10 +0000 Subject: Merge branch 'pf-document-extensions' Reviewed-by: Richard Maw --- kvm.write.help | 11 ++++++++--- virtualbox-ssh.write.help | 2 ++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/kvm.write.help b/kvm.write.help index a9b63303..db2c0d97 100644 --- a/kvm.write.help +++ b/kvm.write.help @@ -1,6 +1,8 @@ help: | - Create a KVM/LibVirt virtual machine during Morph's deployment. + Deploy a Baserock system as a *new* KVM/LibVirt virtual machine. + + Use the `ssh-rsync` write extension to deploy upgrades to an *existing* VM Parameters: @@ -37,5 +39,8 @@ help: | * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to tell Linux to use, rather than booting the rootfs directly. - * AUTOSTART=` - allowed values are `yes` and `no` (default). If the - value is 'yes', the VM will be started when it has been deployed + * AUTOSTART=` - boolean. If it is set, the VM will be started when + it has been deployed. + + (See `morph help deploy` for details of how to pass parameters to write extensions) + diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help index a19d8fb2..ad2a83eb 100644 --- a/virtualbox-ssh.write.help +++ b/virtualbox-ssh.write.help @@ -82,3 +82,5 @@ help: | See Chapter 6 of the VirtualBox User Manual for more information about virtual networking (https://www.virtualbox.org/manual/ch06.html) + + (See `morph help deploy` for details of how to pass parameters to write extensions) -- cgit v1.2.1 From 79817575561496fdc91787d93cfeef2934290ec5 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Wed, 19 Nov 2014 16:21:47 +0000 Subject: Modify rawdisk.write to allow the deployment to devices This patch also modifies rawdisk.write to use the UPGRADE environment variable to figure out when is doing an upgrade or a fresh deployment. This change is important because os.path.isfile doesn't work with devices. --- rawdisk.write | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/rawdisk.write b/rawdisk.write index 12db4398..e1a75fe0 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -43,17 +43,25 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Wrong number of command line args') temp_root, location = args - if os.path.isfile(location): + upgrade = self.get_environment_boolean('UPGRADE') + + if upgrade: self.upgrade_local_system(location, temp_root) else: try: - self.create_local_system(temp_root, location) - self.status(msg='Disk image has been created at %s' % location) + if not self.is_device(location): + with self.created_disk_image(location): + self.format_btrfs(location) + self.create_system(temp_root, location) + self.status(msg='Disk image has been created at %s' % + location) + else: + self.format_btrfs(location) + self.create_system(temp_root, location) + self.status(msg='System deployed to %s' % location) except Exception: - self.status(msg='Failure to create disk image at %s' % + self.status(msg='Failure to deploy system to %s' % location) - if os.path.exists(location): - os.remove(location) raise def upgrade_local_system(self, raw_disk, temp_root): -- cgit v1.2.1 From b80fb87d383b74cf2827443ae24a3a304bdb0fc8 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Wed, 19 Nov 2014 17:19:40 +0000 Subject: Update rawdisk.check to support device deployments --- rawdisk.check | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/rawdisk.check b/rawdisk.check index acdc4de1..094adb72 100755 --- a/rawdisk.check +++ b/rawdisk.check @@ -33,10 +33,11 @@ class RawdiskCheckExtension(morphlib.writeexts.WriteExtension): location = args[0] upgrade = self.get_environment_boolean('UPGRADE') if upgrade: - if not os.path.isfile(location): - raise cliapp.AppException( - 'Cannot upgrade %s: it is not an existing disk image' % - location) + if not self.is_device(location): + if not os.path.isfile(location): + raise cliapp.AppException( + 'Cannot upgrade %s: it is not an existing disk image' % + location) version_label = os.environ.get('VERSION_LABEL') if version_label is None: @@ -44,9 +45,10 @@ class RawdiskCheckExtension(morphlib.writeexts.WriteExtension): 'VERSION_LABEL was not given. It is required when ' 'upgrading an existing system.') else: - if os.path.exists(location): - raise cliapp.AppException( - 'Target %s already exists. Use `morph upgrade` if you ' - 'want to update an existing image.' % location) + if not self.is_device(location): + if os.path.exists(location): + raise cliapp.AppException( + 'Target %s already exists. Use `morph upgrade` if you ' + 'want to update an existing image.' % location) RawdiskCheckExtension().run() -- cgit v1.2.1 From beb86b3ad69f0712d432bdef1dad609171bfeb7a Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Mon, 1 Dec 2014 15:25:21 +0000 Subject: writeexts.py: convert 'mount' to context manager --- openstack.write | 6 +----- rawdisk.write | 65 ++++++++++++++++++++++++++------------------------------- 2 files changed, 31 insertions(+), 40 deletions(-) diff --git a/openstack.write b/openstack.write index 516fe367..b1941d3c 100755 --- a/openstack.write +++ b/openstack.write @@ -79,8 +79,7 @@ class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): def set_extlinux_root_to_virtio(self, raw_disk): '''Re-configures extlinux to use virtio disks''' self.status(msg='Updating extlinux.conf') - mp = self.mount(raw_disk) - try: + with self.mount(raw_disk) as mp: path = os.path.join(mp, 'extlinux.conf') with open(path) as f: @@ -91,9 +90,6 @@ class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): with open(path, "w") as f: f.write(extlinux_conf) - finally: - self.unmount(mp) - def get_openstack_parameters(self): '''Get the environment variables needed. diff --git a/rawdisk.write b/rawdisk.write index e1a75fe0..b17f8aa7 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -67,51 +67,46 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): def upgrade_local_system(self, raw_disk, temp_root): self.complete_fstab_for_btrfs_layout(temp_root) - mp = self.mount(raw_disk) + with self.mount(raw_disk) as mp: + version_label = self.get_version_label(mp) + self.status(msg='Updating image to a new version with label %s' % + version_label) + + version_root = os.path.join(mp, 'systems', version_label) + os.mkdir(version_root) + + old_orig = os.path.join(mp, 'systems', 'default', 'orig') + new_orig = os.path.join(version_root, 'orig') + cliapp.runcmd( + ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig]) + + cliapp.runcmd( + ['rsync', '-a', '--checksum', '--numeric-ids', '--delete', + temp_root + os.path.sep, new_orig]) + + self.create_run(version_root) + + default_path = os.path.join(mp, 'systems', 'default') + if os.path.exists(default_path): + os.remove(default_path) + else: + # we are upgrading and old system that does + # not have an updated extlinux config file + if self.bootloader_config_is_wanted(): + self.generate_bootloader_config(mp) + self.install_bootloader(mp) + os.symlink(version_label, default_path) - version_label = self.get_version_label(mp) - self.status(msg='Updating image to a new version with label %s' % - version_label) - - version_root = os.path.join(mp, 'systems', version_label) - os.mkdir(version_root) - - old_orig = os.path.join(mp, 'systems', 'default', 'orig') - new_orig = os.path.join(version_root, 'orig') - cliapp.runcmd( - ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig]) - - cliapp.runcmd( - ['rsync', '-a', '--checksum', '--numeric-ids', '--delete', - temp_root + os.path.sep, new_orig]) - - self.create_run(version_root) - - default_path = os.path.join(mp, 'systems', 'default') - if os.path.exists(default_path): - os.remove(default_path) - else: - # we are upgrading and old system that does - # not have an updated extlinux config file if self.bootloader_config_is_wanted(): - self.generate_bootloader_config(mp) - self.install_bootloader(mp) - os.symlink(version_label, default_path) - - if self.bootloader_config_is_wanted(): - self.install_kernel(version_root, temp_root) - - self.unmount(mp) + self.install_kernel(version_root, temp_root) def get_version_label(self, mp): version_label = os.environ.get('VERSION_LABEL') if version_label is None: - self.unmount(mp) raise cliapp.AppException('VERSION_LABEL was not given') if os.path.exists(os.path.join(mp, 'systems', version_label)): - self.unmount(mp) raise cliapp.AppException('VERSION_LABEL %s already exists' % version_label) -- cgit v1.2.1 From 5a070828829e4c28d6e0a44c047a116ee1bbd1ae Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Mon, 1 Dec 2014 17:43:20 +0000 Subject: Fix line lengths to be shorter than 80 columns --- kvm.write.help | 7 ++++--- virtualbox-ssh.write.help | 39 ++++++++++++++++++++++----------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/kvm.write.help b/kvm.write.help index db2c0d97..26a54d9c 100644 --- a/kvm.write.help +++ b/kvm.write.help @@ -28,8 +28,8 @@ help: | kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would create a 100 gigabyte disk image. **This parameter is mandatory**. - * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate for - itself from the host. `X` is interpreted in the same was as for + * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate + for itself from the host. `X` is interpreted in the same was as for DISK_SIZE`, and defaults to `1G` * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do @@ -42,5 +42,6 @@ help: | * AUTOSTART=` - boolean. If it is set, the VM will be started when it has been deployed. - (See `morph help deploy` for details of how to pass parameters to write extensions) + (See `morph help deploy` for details of how to pass parameters to write + extensions) diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help index ad2a83eb..b4c59553 100644 --- a/virtualbox-ssh.write.help +++ b/virtualbox-ssh.write.help @@ -26,17 +26,17 @@ help: | which it is being deployed. * DISK_SIZE=X: **(MANDATORY)** the size of the VM's primary virtual hard - disk. `X` should use a suffix of `K`, `M`, or `G` (in upper or lower case) - to indicate kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` - would create a 100 gigabyte virtual hard disk. + disk. `X` should use a suffix of `K`, `M`, or `G` (in upper or lower + case) to indicate kilo-, mega-, or gigabytes. For example, + `DISK_SIZE=100G` would create a 100 gigabyte virtual hard disk. - * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate for - itself from the host. `X` is interpreted in the same as for DISK_SIZE`, - and defaults to `1G`. + * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate + for itself from the host. `X` is interpreted in the same as for + DISK_SIZE, and defaults to `1G`. * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do - not use more CPU cores than you have available physically (real cores, no - hyperthreads). + not use more CPU cores than you have available physically (real cores, + no hyperthreads). * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to tell Linux to use, rather than booting the rootfs directly. @@ -45,8 +45,8 @@ help: | it has been deployed. * VAGRANT= - boolean. If it is set, then networking is configured - so that the VM will work with Vagrant. Otherwise networking is configured - to run directly in VirtualBox. + so that the VM will work with Vagrant. Otherwise networking is + configured to run directly in VirtualBox. * HOST_IPADDR= - the IP address of the VM host. @@ -61,9 +61,10 @@ help: | eth0:dhcp eth1:static,address=10.0.0.1,netmask=255.255.0.0 - An example of the NETWORK_CONFIG parameter + An example of the NETWORK_CONFIG parameter (It should be in one line) - `"lo:loopback;eth0:static,address=192.168.100.2,netmask=255.255.255.0;eth1:dhcp,hostname=$(hostname)"` + `"lo:loopback;eth0:static,address=192.168.100.2,netmask=255.255.255.0; + eth1:dhcp,hostname=$(hostname)"` It is useful to configure one interface to use NAT to give the VM access to the outside world and another interface to use the Virtual Box host @@ -73,14 +74,18 @@ help: | interface is configured statically. Note: you must give the host-only adapter interface an address that lies - **on the same network** as the host adapter. So if the host adapter has an - IP of 192.168.100.1 eth0 should have an address such as 192.168.100.42. + **on the same network** as the host adapter. So if the host adapter has + an IP of 192.168.100.1 eth0 should have an address such as + 192.168.100.42. The settings of the host adapter, including its IP can be changed either - in the VirtualBox manager UI (https://www.virtualbox.org/manual/ch03.html#settings-network) - or via the VBoxManage command line (https://www.virtualbox.org/manual/ch08.html#idp57572192) + in the VirtualBox manager UI + (https://www.virtualbox.org/manual/ch03.html#settings-network) + or via the VBoxManage command line + (https://www.virtualbox.org/manual/ch08.html#idp57572192) See Chapter 6 of the VirtualBox User Manual for more information about virtual networking (https://www.virtualbox.org/manual/ch06.html) - (See `morph help deploy` for details of how to pass parameters to write extensions) + (See `morph help deploy` for details of how to pass parameters to write + extensions) -- cgit v1.2.1 From bf19f21a4820f2cd0a08a8a5fb397932fc851cb6 Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Tue, 2 Dec 2014 13:58:13 +0000 Subject: OpenStack write extension documentation --- openstack.write | 34 ++-------------------------------- openstack.write.help | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 32 deletions(-) create mode 100644 openstack.write.help diff --git a/openstack.write b/openstack.write index b1941d3c..faf47f54 100755 --- a/openstack.write +++ b/openstack.write @@ -28,40 +28,11 @@ import morphlib.writeexts class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): - '''Configure a raw disk image into an OpenStack host. - - The raw disk image is created during Morph's deployment and the - image is deployed in OpenStack using python-glanceclient. - - The location command line argument is the authentification url - of the OpenStack server using the following syntax: - - http://HOST:PORT/VERSION - - where - - * HOST is the host running OpenStack - * PORT is the port which is using OpenStack for authentifications. - * VERSION is the authentification version of OpenStack (Only v2.0 - supported) - - This extension needs in the environment the following variables: - - * OPENSTACK_USER is the username to use in the deployment. - * OPENSTACK_TENANT is the project name to use in the deployment. - * OPENSTACK_IMAGENAME is the name of the image to create. - * OPENSTACK_PASSWORD is the password of the user. - - - The extension will connect to OpenStack using python-glanceclient - to configure a raw image. - - ''' def process_args(self, args): if len(args) != 2: raise cliapp.AppException('Wrong number of command line args') - + temp_root, location = args os_params = self.get_openstack_parameters() @@ -69,7 +40,7 @@ class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): fd, raw_disk = tempfile.mkstemp() os.close(fd) self.create_local_system(temp_root, raw_disk) - self.status(msg='Temporary disk image has been created at %s' + self.status(msg='Temporary disk image has been created at %s' % raw_disk) self.set_extlinux_root_to_virtio(raw_disk) @@ -120,4 +91,3 @@ class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): self.status(msg='Image configured.') OpenStackWriteExtension().run() - diff --git a/openstack.write.help b/openstack.write.help new file mode 100644 index 00000000..579512f5 --- /dev/null +++ b/openstack.write.help @@ -0,0 +1,37 @@ +help: | + + Deploy a Baserock system as a *new* OpenStack virtual machine. + (Use the `ssh-rsync` write extension to deploy upgrades to an *existing* + VM) + + Deploys the system to the OpenStack host using python-glanceclient. + + Parameters: + + * location: the authentication url of the OpenStack server using the + following syntax: + + http://HOST:PORT/VERSION + + where + + * HOST is the host running OpenStack + * PORT is the port which is using OpenStack for authentifications. + * VERSION is the authentification version of OpenStack (Only v2.0 + supported) + + * OPENSTACK_USER=username: the username to use in the `--os-username` + argument to `glance`. + + * OPENSTACK_TENANT=tenant: the project name to use in the `--os-tenant-name` + argument to `glance`. + + * OPENSTACK_IMAGENAME=imagename: the name of the image to use in the + `--name` argument to `glance`. + + * OPENSTACK_PASSWORD=password: the password of the OpenStack user. (We + recommend passing this on the command-line, rather than setting an + environment variable or storing it in a cluster cluster definition file.) + + (See `morph help deploy` for details of how to pass parameters to write + extensions) -- cgit v1.2.1 From f33d468ea48fba8b75aa3aa1492983bbccae46aa Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Thu, 4 Dec 2014 13:49:59 +0000 Subject: ssh-rsync write extension documentation --- ssh-rsync.write.help | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 ssh-rsync.write.help diff --git a/ssh-rsync.write.help b/ssh-rsync.write.help new file mode 100644 index 00000000..4ef666e8 --- /dev/null +++ b/ssh-rsync.write.help @@ -0,0 +1,36 @@ +help: | + + Upgrade a Baserock system which is already deployed: + - as a KVM/LibVirt, OpenStack or vbox-ssh virtual machine; + - on a Jetson board. + + Copies a binary delta over to the target system and arranges for it + to be bootable. + + The recommended way to use this extension is by calling `morph upgrade`. + Using `morph deploy --upgrade` is deprecated. + + The upgrade will fail if: + - no VM is deployed and running at `location`; + - the target system is not a Baserock system; + - the target's filesystem and its layout are not compatible with that + created by `morph deploy`." + + See also the 'Upgrading a Baserock installation' section of the 'Using + Baserock` page at wiki.baserock.org + http://wiki.baserock.org/devel-with/#index8h2 + + Parameters: + + * location: the 'user@hostname' string that will be used by ssh and rsync. + 'user' will always be `root` and `hostname` the hostname or address of the + system being upgraded. + + * VERSION_LABEL=label - **(MANDATORY)** should contain only alpha-numeric + characters and the '-' (hyphen) character. + + * AUTOSTART=` - boolean. If it is set, the VM will be started when + it has been deployed. + + (See `morph help deploy` for details of how to pass parameters to write + extensions) -- cgit v1.2.1 From 3599f68c40dc74f5634801b9c3ced856cf91155d Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Fri, 5 Dec 2014 09:02:34 +0000 Subject: rawdisk write extension documentation --- rawdisk.write.help | 42 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/rawdisk.write.help b/rawdisk.write.help index 298d441c..f225276d 100644 --- a/rawdisk.write.help +++ b/rawdisk.write.help @@ -1,11 +1,37 @@ help: | - Create a raw disk image during Morph's deployment. - - If the image already exists, it is upgraded. - The `location` argument is a pathname to the image to be - created or upgraded. + Write a system produced by Morph to a physical disk, or to a file that can + be used as a virtual disk. The target will be formatted as a single Btrfs + partition, with the system image written to a subvolume in /systems, and + other subvolumes created for /home, /opt, /root, /srv and /var. - The INITRAMFS_PATH option can be used to specify the location of an - initramfs for syslinux to tell Linux to use, rather than booting - the rootfs directly. + When written to a physical drive, the drive can be used as the boot device + for a 'real' machine. + + When written to a file, the file can be used independently of `morph` to + create virtual machines with KVM / libvirt, OpenStack or, after converting + it to VDI format, VirtualBox. + + `morph deploy` will fail if the file specified by `location` already exists. + + If used in `morph upgrade`, the rootfs produced by 'morph build' is added to + the existing raw disk image or device as an additional btrfs sub-volume. + `morph upgrade` will fail if the file specified by `location` does not + exist, or is not a Baserock raw disk image. (Most users are unlikely to need + or use this functionality: it is useful mainly for developers working on the + Baserock tools.) + + Parameters: + + * location: the pathname of the disk image to be created/upgraded, or the + path to the physical device. + + * VERSION_LABEL=label - should contain only alpha-numeric + characters and the '-' (hyphen) character. Mandatory if being used with + `morph update` + + * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to + tell Linux to use, rather than booting the rootfs directly. + + (See `morph help deploy` for details of how to pass parameters to write + extensions) -- cgit v1.2.1 From a4534ef98b71b6d7bc57ae863fcdce353e174afb Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Mon, 8 Dec 2014 08:32:22 +0000 Subject: initramfs write extension documentation --- initramfs.write.help | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/initramfs.write.help b/initramfs.write.help index 29a9d266..a4a89f9d 100644 --- a/initramfs.write.help +++ b/initramfs.write.help @@ -1,4 +1,5 @@ help: | + Create an initramfs for a system by taking an existing system and converting it to the appropriate format. @@ -33,3 +34,8 @@ help: | initramfs: type: initramfs location: boot/initramfs.gz + + Parameters: + + * location: the path where the initramfs will be installed (e.g. + `boot/initramfs.gz`) in the above example -- cgit v1.2.1 From bfc74c3a2a1c5058e18653f4fb28e0390b66d520 Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Fri, 5 Dec 2014 15:34:07 +0000 Subject: Add a reference to write.help file --- openstack.write | 1 + rawdisk.write | 12 ++---------- ssh-rsync.write | 10 ++-------- 3 files changed, 5 insertions(+), 18 deletions(-) diff --git a/openstack.write b/openstack.write index faf47f54..d29d2661 100755 --- a/openstack.write +++ b/openstack.write @@ -28,6 +28,7 @@ import morphlib.writeexts class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): + '''See openstack.write.help for documentation''' def process_args(self, args): if len(args) != 2: diff --git a/rawdisk.write b/rawdisk.write index b17f8aa7..d91a4d5f 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -29,19 +29,12 @@ import morphlib.writeexts class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): - '''Create a raw disk image during Morph's deployment. - - If the image already exists, it is upgraded. - - The location command line argument is the pathname of the disk image - to be created/upgraded. - - ''' + '''See rawdisk.write.help for documentation''' def process_args(self, args): if len(args) != 2: raise cliapp.AppException('Wrong number of command line args') - + temp_root, location = args upgrade = self.get_environment_boolean('UPGRADE') @@ -114,4 +107,3 @@ class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): RawDiskWriteExtension().run() - diff --git a/ssh-rsync.write b/ssh-rsync.write index 2d7258ba..c4577026 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -37,14 +37,8 @@ def ssh_runcmd_ignore_failure(location, command, **kwargs): class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): - '''Upgrade a running baserock system with ssh and rsync. - - It assumes the system is baserock-based and has a btrfs partition. - - The location command line argument is the 'user@hostname' string - that will be passed to ssh and rsync - - ''' + '''See ssh-rsync.write.help for documentation''' + def find_root_disk(self, location): '''Read /proc/mounts on location to find which device contains "/"''' -- cgit v1.2.1 From de88869eff96b3f9c09f61f4c9f67c0a710e1974 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 9 Dec 2014 16:19:11 +0000 Subject: openstack.check: Be more careful when claiming credentials are invalid In order to check the user's credentials at the start of deployment, we try to run `glance image-list`. I found a situation where this command failed despite my credentials being correct. Morph outputted a misleading error message that said 'Wrong OpenStack credentials' The code now checks that the error returned by 'glance' does indeed look like a credentials error. If it doesn't, the full error output is displayed. The error I encountered now gets a message like this: ERROR: openstack.check failed with code 1: ERROR: Failed to connect to OpenStack instance at https://example.com:5000/v2.0: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE', 'certificate verify failed')] (If you are curious, I fixed this by running `update-ca-certificates`.) --- openstack.check | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/openstack.check b/openstack.check index edc37cc1..a6856c31 100755 --- a/openstack.check +++ b/openstack.check @@ -77,9 +77,16 @@ class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): '--os-password', password, '--os-auth-url', auth_url, 'image-list'] - try: - cliapp.runcmd(cmdline) - except cliapp.AppException: - raise cliapp.AppException('Wrong OpenStack credentals.') + + exit, out, err = cliapp.runcmd_unchecked(cmdline) + + if exit != 0: + if err.startswith('The request you have made requires ' \ + 'authentication. (HTTP 401)'): + raise cliapp.AppException('Invalid OpenStack credentials.') + else: + raise cliapp.AppException( + 'Failed to connect to OpenStack instance at %s: %s' % + (auth_url, err)) OpenStackCheckExtension().run() -- cgit v1.2.1 From 6c1cc0f3706d1019158b6a31735f416b974a1226 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 9 Dec 2014 17:49:36 +0000 Subject: deploy: Note that some of the extensions listed live in definitions.git The user should be aware of this because if they aren't building baserock:baserock/definitions or a repo forked from it, those extensions won't be available. Also fix some long lines that I seem to have failed to commit already. --- openstack.write.help | 8 ++++---- rawdisk.write.help | 13 +++++++------ ssh-rsync.write.help | 4 ++-- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/openstack.write.help b/openstack.write.help index 579512f5..75ad9f0c 100644 --- a/openstack.write.help +++ b/openstack.write.help @@ -16,15 +16,15 @@ help: | where * HOST is the host running OpenStack - * PORT is the port which is using OpenStack for authentifications. - * VERSION is the authentification version of OpenStack (Only v2.0 + * PORT is the port which is using OpenStack for authentications. + * VERSION is the authentication version of OpenStack (Only v2.0 supported) * OPENSTACK_USER=username: the username to use in the `--os-username` argument to `glance`. - * OPENSTACK_TENANT=tenant: the project name to use in the `--os-tenant-name` - argument to `glance`. + * OPENSTACK_TENANT=tenant: the project name to use in the + `--os-tenant-name` argument to `glance`. * OPENSTACK_IMAGENAME=imagename: the name of the image to use in the `--name` argument to `glance`. diff --git a/rawdisk.write.help b/rawdisk.write.help index f225276d..81f35024 100644 --- a/rawdisk.write.help +++ b/rawdisk.write.help @@ -12,14 +12,15 @@ help: | create virtual machines with KVM / libvirt, OpenStack or, after converting it to VDI format, VirtualBox. - `morph deploy` will fail if the file specified by `location` already exists. + `morph deploy` will fail if the file specified by `location` already + exists. - If used in `morph upgrade`, the rootfs produced by 'morph build' is added to - the existing raw disk image or device as an additional btrfs sub-volume. + If used in `morph upgrade`, the rootfs produced by 'morph build' is added + to the existing raw disk image or device as an additional btrfs sub-volume. `morph upgrade` will fail if the file specified by `location` does not - exist, or is not a Baserock raw disk image. (Most users are unlikely to need - or use this functionality: it is useful mainly for developers working on the - Baserock tools.) + exist, or is not a Baserock raw disk image. (Most users are unlikely to + need or use this functionality: it is useful mainly for developers working + on the Baserock tools.) Parameters: diff --git a/ssh-rsync.write.help b/ssh-rsync.write.help index 4ef666e8..d03508c0 100644 --- a/ssh-rsync.write.help +++ b/ssh-rsync.write.help @@ -23,8 +23,8 @@ help: | Parameters: * location: the 'user@hostname' string that will be used by ssh and rsync. - 'user' will always be `root` and `hostname` the hostname or address of the - system being upgraded. + 'user' will always be `root` and `hostname` the hostname or address of + the system being upgraded. * VERSION_LABEL=label - **(MANDATORY)** should contain only alpha-numeric characters and the '-' (hyphen) character. -- cgit v1.2.1 From 37ca9dae1b891d73f891448d32fbbd8d7120850c Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Fri, 12 Dec 2014 11:26:58 +0000 Subject: Document DTB_PATH write extension parameter --- kvm.write.help | 6 +++++- rawdisk.write.help | 5 +++++ virtualbox-ssh.write.help | 5 +++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/kvm.write.help b/kvm.write.help index 26a54d9c..6df6c53c 100644 --- a/kvm.write.help +++ b/kvm.write.help @@ -42,6 +42,10 @@ help: | * AUTOSTART=` - boolean. If it is set, the VM will be started when it has been deployed. + * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree + binary - Give the full path (without a leading /) to the location of the + DTB in the built system image . The deployment will fail if `path` does + not exist. + (See `morph help deploy` for details of how to pass parameters to write extensions) - diff --git a/rawdisk.write.help b/rawdisk.write.help index 81f35024..fe47c890 100644 --- a/rawdisk.write.help +++ b/rawdisk.write.help @@ -34,5 +34,10 @@ help: | * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to tell Linux to use, rather than booting the rootfs directly. + * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree + binary - Give the full path (without a leading /) to the location of the + DTB in the built system image . The deployment will fail if `path` does + not exist. + (See `morph help deploy` for details of how to pass parameters to write extensions) diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help index b4c59553..7131f8b8 100644 --- a/virtualbox-ssh.write.help +++ b/virtualbox-ssh.write.help @@ -41,6 +41,11 @@ help: | * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to tell Linux to use, rather than booting the rootfs directly. + * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree + binary - Give the full path (without a leading /) to the location of the + DTB in the built system image . The deployment will fail if `path` does + not exist. + * AUTOSTART= - boolean. If it is set, the VM will be started when it has been deployed. -- cgit v1.2.1 From 859568b79d22299c082a4108dd057b9145a13eda Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Fri, 12 Dec 2014 11:30:13 +0000 Subject: Document BOOTLOADER_INSTALL and BOOTLOADER_CONFIG_FORMAT write extension parameters --- kvm.write.help | 16 ++++++++++++++++ rawdisk.write.help | 16 ++++++++++++++++ virtualbox-ssh.write.help | 16 ++++++++++++++++ 3 files changed, 48 insertions(+) diff --git a/kvm.write.help b/kvm.write.help index 6df6c53c..8ddcb89c 100644 --- a/kvm.write.help +++ b/kvm.write.help @@ -47,5 +47,21 @@ help: | DTB in the built system image . The deployment will fail if `path` does not exist. + * BOOTLOADER_INSTALL=value: the bootloader to be installed + **(MANDATORY)** for non-x86 systems + + allowed values = + - 'extlinux' (default) - the extlinux bootloader will + be installed + - 'none' - no bootloader will be installed by `morph deploy`. A + bootloader must be installed manually. This value must be used when + deploying non-x86 systems such as ARM. + + * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used. + If not specified for x86-32 and x86-64 systems, 'extlinux' will be used + + allowed values = + - 'extlinux' + (See `morph help deploy` for details of how to pass parameters to write extensions) diff --git a/rawdisk.write.help b/rawdisk.write.help index fe47c890..d6c78573 100644 --- a/rawdisk.write.help +++ b/rawdisk.write.help @@ -39,5 +39,21 @@ help: | DTB in the built system image . The deployment will fail if `path` does not exist. + * BOOTLOADER_INSTALL=value: the bootloader to be installed + **(MANDATORY)** for non-x86 systems + + allowed values = + - 'extlinux' (default) - the extlinux bootloader will + be installed + - 'none' - no bootloader will be installed by `morph deploy`. A + bootloader must be installed manually. This value must be used when + deploying non-x86 systems such as ARM. + + * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used. + If not specified for x86-32 and x86-64 systems, 'extlinux' will be used + + allowed values = + - 'extlinux' + (See `morph help deploy` for details of how to pass parameters to write extensions) diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help index 7131f8b8..4dddd987 100644 --- a/virtualbox-ssh.write.help +++ b/virtualbox-ssh.write.help @@ -46,6 +46,22 @@ help: | DTB in the built system image . The deployment will fail if `path` does not exist. + * BOOTLOADER_INSTALL=value: the bootloader to be installed + **(MANDATORY)** for non-x86 systems + + allowed values = + - 'extlinux' (default) - the extlinux bootloader will + be installed + - 'none' - no bootloader will be installed by `morph deploy`. A + bootloader must be installed manually. This value must be used when + deploying non-x86 systems such as ARM. + + * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used. + If not specified for x86-32 and x86-64 systems, 'extlinux' will be used + + allowed values = + - 'extlinux' + * AUTOSTART= - boolean. If it is set, the VM will be started when it has been deployed. -- cgit v1.2.1 From cb8811daf402a8485f0ca9fd678843f11aaaf5ea Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Fri, 12 Dec 2014 11:36:04 +0000 Subject: Document KERNEL_ARGS write extension parameter --- kvm.write.help | 9 +++++++++ rawdisk.write.help | 9 +++++++++ virtualbox-ssh.write.help | 9 +++++++++ 3 files changed, 27 insertions(+) diff --git a/kvm.write.help b/kvm.write.help index 8ddcb89c..04393b8a 100644 --- a/kvm.write.help +++ b/kvm.write.help @@ -63,5 +63,14 @@ help: | allowed values = - 'extlinux' + * KERNEL_ARGS=args: optional additional kernel command-line parameters to + be appended to the default set. The default set is: + + 'rw init=/sbin/init rootfstype=btrfs \ + rootflags=subvol=systems/default/run \ + root=[name or UUID of root filesystem]' + + (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt) + (See `morph help deploy` for details of how to pass parameters to write extensions) diff --git a/rawdisk.write.help b/rawdisk.write.help index d6c78573..54af81c4 100644 --- a/rawdisk.write.help +++ b/rawdisk.write.help @@ -55,5 +55,14 @@ help: | allowed values = - 'extlinux' + * KERNEL_ARGS=args: optional additional kernel command-line parameters to + be appended to the default set. The default set is: + + 'rw init=/sbin/init rootfstype=btrfs \ + rootflags=subvol=systems/default/run \ + root=[name or UUID of root filesystem]' + + (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt) + (See `morph help deploy` for details of how to pass parameters to write extensions) diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help index 4dddd987..cb50acc0 100644 --- a/virtualbox-ssh.write.help +++ b/virtualbox-ssh.write.help @@ -62,6 +62,15 @@ help: | allowed values = - 'extlinux' + * KERNEL_ARGS=args: optional additional kernel command-line parameters to + be appended to the default set. The default set is: + + 'rw init=/sbin/init rootfstype=btrfs \ + rootflags=subvol=systems/default/run \ + root=[name or UUID of root filesystem]' + + (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt) + * AUTOSTART= - boolean. If it is set, the VM will be started when it has been deployed. -- cgit v1.2.1 From 701ebe1464a97dfd40336007c8da89d9c9685ad8 Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Fri, 12 Dec 2014 11:56:39 +0000 Subject: Add 'do not use' warnings to nfsboot write extension --- nfsboot.write | 13 +++++++++++-- nfsboot.write.help | 9 ++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/nfsboot.write b/nfsboot.write index 8d3d6df7..9fa6fc84 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -17,6 +17,16 @@ '''A Morph deployment write extension for deploying to an nfsboot server +*** DO NOT USE *** +- This was written before 'proper' deployment mechanisms were in place +It is unlikely to work at all and will not work correctly + +Use the pxeboot werite extension instead + +*** + + + An nfsboot server is defined as a baserock system that has tftp and nfs servers running, the tftp server is exporting the contents of /srv/nfsboot/tftp/ and the user has sufficient permissions to create nfs roots @@ -125,7 +135,7 @@ class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): self.status(msg='Creating destination directories') try: - cliapp.ssh_runcmd('root@%s' % location, + cliapp.ssh_runcmd('root@%s' % location, ['mkdir', '-p', orig_path, run_path]) except cliapp.AppException: raise cliapp.AppException('Could not create dirs %s and %s on %s' @@ -191,4 +201,3 @@ mv "$temp" "$target" NFSBootWriteExtension().run() - diff --git a/nfsboot.write.help b/nfsboot.write.help index 598b1b23..310fd7a4 100644 --- a/nfsboot.write.help +++ b/nfsboot.write.help @@ -1,6 +1,13 @@ help: | + *** DO NOT USE *** + - This was written before 'proper' deployment mechanisms were in place. + It is unlikely to work at all, and will not work correctly. + + Use the pxeboot write extension instead + + *** Deploy a system image and kernel to an nfsboot server. - + An nfsboot server is defined as a baserock system that has tftp and nfs servers running, the tftp server is exporting the contents of /srv/nfsboot/tftp/ and the user has sufficient -- cgit v1.2.1 From 28d69d11444fcd5a6531f145ce521cc4d4346dde Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Thu, 18 Dec 2014 10:12:41 +0000 Subject: Remove trailing \ I meant to do this as part of the previous merge. --- openstack.check | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openstack.check b/openstack.check index a6856c31..3850d481 100755 --- a/openstack.check +++ b/openstack.check @@ -81,7 +81,7 @@ class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): exit, out, err = cliapp.runcmd_unchecked(cmdline) if exit != 0: - if err.startswith('The request you have made requires ' \ + if err.startswith('The request you have made requires ' 'authentication. (HTTP 401)'): raise cliapp.AppException('Invalid OpenStack credentials.') else: -- cgit v1.2.1 From 6a49bc67b3fc5073612195510d556129bfdd28bb Mon Sep 17 00:00:00 2001 From: Tiago Gomes Date: Thu, 1 Jan 2015 00:25:54 +0000 Subject: Add some checks to the sysroot deployment extension Ensure that a) the deployment directory must not exist b) the extension can not be used to upgrade a system --- sysroot.check | 30 ++++++++++++++++++++++++++++++ sysroot.write | 4 +--- 2 files changed, 31 insertions(+), 3 deletions(-) create mode 100755 sysroot.check diff --git a/sysroot.check b/sysroot.check new file mode 100755 index 00000000..bfacd3fc --- /dev/null +++ b/sysroot.check @@ -0,0 +1,30 @@ +#!/bin/sh +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +# Preparatory checks for Morph 'sysroot' write extension + +set -eu + +location="$1" +if [ -d "$location" ]; then + echo >&2 "ERROR: Deployment directory already exists: $location" + exit 1 +fi + +if [ "$UPGRADE" == "yes" ]; then + echo >&2 "ERROR: Cannot upgrade a sysroot deployment" + exit 1 +fi diff --git a/sysroot.write b/sysroot.write index 1ae4864f..be315365 100755 --- a/sysroot.write +++ b/sysroot.write @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -18,9 +18,7 @@ set -eu -# Ensure the target is an empty directory mkdir -p "$2" -find "$2" -mindepth 1 -delete # Move the contents of our source directory to our target # Previously we would (cd "$1" && find -print0 | cpio -0pumd "$absolute_path") -- cgit v1.2.1 From 544ac8182471a0552b8a22fd574029e74a30d8ed Mon Sep 17 00:00:00 2001 From: Jim MacArthur Date: Wed, 21 Jan 2015 18:08:01 +0000 Subject: Remove checks for NETWORK_CONFIG and eth0 and eth1 in it network_config isn't used anywhere in this function. The purpose of this function (getting the name of an appropriate host-only network interface) doesn't seem to depend on it either. eth0 and eth1 won't always be present (several Baserock systems will have enp0s3, etc). So I think these checks should be removed. --- virtualbox-ssh.write | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 1b4de89c..fa54c296 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -169,20 +169,6 @@ class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): def get_host_interface(self, ssh_host): host_ipaddr = os.environ.get('HOST_IPADDR') netmask = os.environ.get('NETMASK') - network_config = os.environ.get("NETWORK_CONFIG") - - if network_config is None: - raise cliapp.AppException('NETWORK_CONFIG was not given') - - if "eth0:" not in network_config: - raise cliapp.AppException( - 'NETWORK_CONFIG does not contain ' - 'the eth0 configuration') - - if "eth1:" not in network_config: - raise cliapp.AppException( - 'NETWORK_CONFIG does not contain ' - 'the eth1 configuration') if host_ipaddr is None: raise cliapp.AppException('HOST_IPADDR was not given') -- cgit v1.2.1 From 0e6d9749b509382f65177337ec6465cdbea44b7b Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Wed, 28 Jan 2015 11:32:07 +0000 Subject: Check file can be created at location --- kvm.check | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/kvm.check b/kvm.check index 1bb4007a..3c6accbf 100755 --- a/kvm.check +++ b/kvm.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -43,6 +43,7 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): ssh_host, vm_name, vm_path = self.check_and_parse_location(location) self.check_ssh_connectivity(ssh_host) + self.check_can_create_file_at_given_path(ssh_host, vm_path) self.check_no_existing_libvirt_vm(ssh_host, vm_name) self.check_extra_disks_exist(ssh_host, self.parse_attach_disks()) @@ -73,6 +74,26 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): 'write extension to deploy upgrades to existing machines.' % (ssh_host, vm_name)) + def check_can_create_file_at_given_path(self, ssh_host, vm_path): + + def check_can_write_to_given_path(): + try: + cliapp.ssh_runcmd(ssh_host, ['touch', vm_path]) + except cliapp.AppException as e: + raise cliapp.AppException("Can't write to location %s on %s" + % (vm_path, ssh_host)) + else: + cliapp.ssh_runcmd(ssh_host, ['rm', vm_path]) + + try: + cliapp.ssh_runcmd(ssh_host, ['test', '-e', vm_path]) + except cliapp.AppException as e: + # vm_path doesn't already exist, so let's test we can write + check_can_write_to_given_path() + else: + raise cliapp.AppException('%s already exists on %s' + % (vm_path, ssh_host)) + def check_extra_disks_exist(self, ssh_host, filename_list): for filename in filename_list: try: -- cgit v1.2.1 From ca9e43bf901d10bd1c0ec580255f6096bb53c65f Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Wed, 28 Jan 2015 17:28:00 +0000 Subject: Add check for virtual networks An exception will be raised if any needed networks are not started --- kvm.check | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/kvm.check b/kvm.check index 3c6accbf..b8877a89 100755 --- a/kvm.check +++ b/kvm.check @@ -17,6 +17,7 @@ '''Preparatory checks for Morph 'kvm' write extension''' import cliapp +import os import re import urlparse @@ -46,6 +47,7 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): self.check_can_create_file_at_given_path(ssh_host, vm_path) self.check_no_existing_libvirt_vm(ssh_host, vm_name) self.check_extra_disks_exist(ssh_host, self.parse_attach_disks()) + self.check_virtual_networks_are_started(ssh_host) def check_and_parse_location(self, location): '''Check and parse the location argument to get relevant data.''' @@ -102,4 +104,50 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Did not find file %s on host %s' % (filename, ssh_host)) + def check_virtual_networks_are_started(self, ssh_host): + + def check_virtual_network_is_started(network_name): + cmd = ['virsh', '-c', 'qemu:///system', 'net-info', network_name] + net_info = cliapp.ssh_runcmd(ssh_host, cmd).split('\n') + + def pretty_concat(lines): + return '\n'.join(['\t%s' % line for line in lines]) + + for line in net_info: + m = re.match('^Active:\W*(\w+)\W*', line) + if m: + break + else: + raise cliapp.AppException( + "Got unexpected output parsing output of `%s':\n%s" + % (' '.join(cmd), pretty_concat(net_info))) + + network_active = m.group(1) == 'yes' + + if not network_active: + raise cliapp.AppException("Network '%s' is not started" + % network_name) + + def name(nic_entry): + if ',' in nic_entry: + # NETWORK_NAME,mac=12:34,model=e1000... + return nic_entry[:nic_entry.find(',')] + else: + return nic_entry # NETWORK_NAME + + if 'NIC_CONFIG' in os.environ: + nics = os.environ['NIC_CONFIG'].split() + + # --network bridge= is used to specify a bridge + # --network user is used to specify a form of NAT + # (see the virt-install(1) man page) + networks = [name(n) for n in nics if not n.startswith('bridge=') + and not n.startswith('user')] + else: + networks = ['default'] + + for network in networks: + check_virtual_network_is_started(network) + + KvmPlusSshCheckExtension().run() -- cgit v1.2.1 From ee1ccdbeda5cc559194db64c6ecf38f28cc391f0 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Tue, 3 Feb 2015 17:35:58 +0000 Subject: Update copyright years so ./check passes --- virtualbox-ssh.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index fa54c296..7eafcff3 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2012-2014 Codethink Limited +# Copyright (C) 2012-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by -- cgit v1.2.1 From 3e8dcd44d14929170c693f6b1ab25be6593bfc9e Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Wed, 28 Jan 2015 11:32:07 +0000 Subject: Check file can be created at location --- kvm.check | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/kvm.check b/kvm.check index 1bb4007a..3c6accbf 100755 --- a/kvm.check +++ b/kvm.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -43,6 +43,7 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): ssh_host, vm_name, vm_path = self.check_and_parse_location(location) self.check_ssh_connectivity(ssh_host) + self.check_can_create_file_at_given_path(ssh_host, vm_path) self.check_no_existing_libvirt_vm(ssh_host, vm_name) self.check_extra_disks_exist(ssh_host, self.parse_attach_disks()) @@ -73,6 +74,26 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): 'write extension to deploy upgrades to existing machines.' % (ssh_host, vm_name)) + def check_can_create_file_at_given_path(self, ssh_host, vm_path): + + def check_can_write_to_given_path(): + try: + cliapp.ssh_runcmd(ssh_host, ['touch', vm_path]) + except cliapp.AppException as e: + raise cliapp.AppException("Can't write to location %s on %s" + % (vm_path, ssh_host)) + else: + cliapp.ssh_runcmd(ssh_host, ['rm', vm_path]) + + try: + cliapp.ssh_runcmd(ssh_host, ['test', '-e', vm_path]) + except cliapp.AppException as e: + # vm_path doesn't already exist, so let's test we can write + check_can_write_to_given_path() + else: + raise cliapp.AppException('%s already exists on %s' + % (vm_path, ssh_host)) + def check_extra_disks_exist(self, ssh_host, filename_list): for filename in filename_list: try: -- cgit v1.2.1 From b3736e8ff76418364dc1f9e0ce41cc0a89f309b7 Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Wed, 28 Jan 2015 17:28:00 +0000 Subject: Add check for virtual networks An exception will be raised if any needed networks are not started --- kvm.check | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/kvm.check b/kvm.check index 3c6accbf..b8877a89 100755 --- a/kvm.check +++ b/kvm.check @@ -17,6 +17,7 @@ '''Preparatory checks for Morph 'kvm' write extension''' import cliapp +import os import re import urlparse @@ -46,6 +47,7 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): self.check_can_create_file_at_given_path(ssh_host, vm_path) self.check_no_existing_libvirt_vm(ssh_host, vm_name) self.check_extra_disks_exist(ssh_host, self.parse_attach_disks()) + self.check_virtual_networks_are_started(ssh_host) def check_and_parse_location(self, location): '''Check and parse the location argument to get relevant data.''' @@ -102,4 +104,50 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('Did not find file %s on host %s' % (filename, ssh_host)) + def check_virtual_networks_are_started(self, ssh_host): + + def check_virtual_network_is_started(network_name): + cmd = ['virsh', '-c', 'qemu:///system', 'net-info', network_name] + net_info = cliapp.ssh_runcmd(ssh_host, cmd).split('\n') + + def pretty_concat(lines): + return '\n'.join(['\t%s' % line for line in lines]) + + for line in net_info: + m = re.match('^Active:\W*(\w+)\W*', line) + if m: + break + else: + raise cliapp.AppException( + "Got unexpected output parsing output of `%s':\n%s" + % (' '.join(cmd), pretty_concat(net_info))) + + network_active = m.group(1) == 'yes' + + if not network_active: + raise cliapp.AppException("Network '%s' is not started" + % network_name) + + def name(nic_entry): + if ',' in nic_entry: + # NETWORK_NAME,mac=12:34,model=e1000... + return nic_entry[:nic_entry.find(',')] + else: + return nic_entry # NETWORK_NAME + + if 'NIC_CONFIG' in os.environ: + nics = os.environ['NIC_CONFIG'].split() + + # --network bridge= is used to specify a bridge + # --network user is used to specify a form of NAT + # (see the virt-install(1) man page) + networks = [name(n) for n in nics if not n.startswith('bridge=') + and not n.startswith('user')] + else: + networks = ['default'] + + for network in networks: + check_virtual_network_is_started(network) + + KvmPlusSshCheckExtension().run() -- cgit v1.2.1 From 8ca2082cdb2cf65b8ec9e1c392349208c0a00373 Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Wed, 4 Mar 2015 13:28:57 +0000 Subject: Add copyright headers to write.help and configure.help files --- initramfs.write.help | 14 ++++++++++++++ install-files.configure.help | 14 ++++++++++++++ kvm.write.help | 14 ++++++++++++++ nfsboot.write.help | 14 ++++++++++++++ openstack.write.help | 14 ++++++++++++++ rawdisk.write.help | 14 ++++++++++++++ ssh-rsync.write.help | 14 ++++++++++++++ tar.write.help | 16 +++++++++++++++- virtualbox-ssh.write.help | 14 ++++++++++++++ 9 files changed, 127 insertions(+), 1 deletion(-) diff --git a/initramfs.write.help b/initramfs.write.help index a4a89f9d..54d3ae8c 100644 --- a/initramfs.write.help +++ b/initramfs.write.help @@ -1,3 +1,17 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + help: | Create an initramfs for a system by taking an existing system and diff --git a/install-files.configure.help b/install-files.configure.help index eb3aab0c..991c26c8 100644 --- a/install-files.configure.help +++ b/install-files.configure.help @@ -1,3 +1,17 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + help: | Install a set of files onto a system diff --git a/kvm.write.help b/kvm.write.help index 04393b8a..812a5309 100644 --- a/kvm.write.help +++ b/kvm.write.help @@ -1,3 +1,17 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + help: | Deploy a Baserock system as a *new* KVM/LibVirt virtual machine. diff --git a/nfsboot.write.help b/nfsboot.write.help index 310fd7a4..186c479a 100644 --- a/nfsboot.write.help +++ b/nfsboot.write.help @@ -1,3 +1,17 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + help: | *** DO NOT USE *** - This was written before 'proper' deployment mechanisms were in place. diff --git a/openstack.write.help b/openstack.write.help index 75ad9f0c..26983060 100644 --- a/openstack.write.help +++ b/openstack.write.help @@ -1,3 +1,17 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + help: | Deploy a Baserock system as a *new* OpenStack virtual machine. diff --git a/rawdisk.write.help b/rawdisk.write.help index 54af81c4..52ed73fb 100644 --- a/rawdisk.write.help +++ b/rawdisk.write.help @@ -1,3 +1,17 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + help: | Write a system produced by Morph to a physical disk, or to a file that can diff --git a/ssh-rsync.write.help b/ssh-rsync.write.help index d03508c0..f3f79ed5 100644 --- a/ssh-rsync.write.help +++ b/ssh-rsync.write.help @@ -1,3 +1,17 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + help: | Upgrade a Baserock system which is already deployed: diff --git a/tar.write.help b/tar.write.help index f052ac03..b45c61fa 100644 --- a/tar.write.help +++ b/tar.write.help @@ -1,5 +1,19 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + help: | Create a .tar file of the deployed system. - + The `location` argument is a pathname to the .tar file to be created. diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help index cb50acc0..2dbf988c 100644 --- a/virtualbox-ssh.write.help +++ b/virtualbox-ssh.write.help @@ -1,3 +1,17 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + help: | Deploy a Baserock system as a *new* VirtualBox virtual machine. -- cgit v1.2.1 From ed741d8d090086e2380f7b9d68ddc3bd122acb9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Fri, 13 Mar 2015 18:18:55 +0000 Subject: Use the modern way of the GPL copyright header: URL instead real address Change-Id: I992dc0c1d40f563ade56a833162d409b02be90a0 --- add-config-files.configure | 5 ++--- fstab.configure | 5 ++--- initramfs.write | 5 ++--- install-files.configure | 5 ++--- kvm.check | 3 +-- kvm.write | 5 ++--- nfsboot.check | 5 ++--- nfsboot.configure | 5 ++--- nfsboot.write | 5 ++--- openstack.check | 5 ++--- openstack.write | 5 ++--- rawdisk.check | 5 ++--- rawdisk.write | 5 ++--- set-hostname.configure | 5 ++--- simple-network.configure | 5 ++--- ssh-rsync.check | 5 ++--- ssh-rsync.write | 5 ++--- sysroot.check | 3 +-- sysroot.write | 3 +-- tar.check | 5 ++--- tar.write | 5 ++--- vdaboot.configure | 5 ++--- virtualbox-ssh.check | 5 ++--- virtualbox-ssh.write | 3 +-- 24 files changed, 44 insertions(+), 68 deletions(-) diff --git a/add-config-files.configure b/add-config-files.configure index 0094cf6b..2cf96fd1 100755 --- a/add-config-files.configure +++ b/add-config-files.configure @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Copy all files located in $SRC_CONFIG_DIR to the image /etc. diff --git a/fstab.configure b/fstab.configure index a1287ea4..3bbc9102 100755 --- a/fstab.configure +++ b/fstab.configure @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # # =*= License: GPL-2 =*= diff --git a/initramfs.write b/initramfs.write index f8af6d84..1059defa 100755 --- a/initramfs.write +++ b/initramfs.write @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # # =*= License: GPL-2 =*= diff --git a/install-files.configure b/install-files.configure index 04dc5f18..58cf373a 100755 --- a/install-files.configure +++ b/install-files.configure @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013-2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . ''' A Morph configuration extension for adding arbitrary files to a system diff --git a/kvm.check b/kvm.check index b8877a89..62d76453 100755 --- a/kvm.check +++ b/kvm.check @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'kvm' write extension''' diff --git a/kvm.write b/kvm.write index 30b43d6c..0d0c095b 100755 --- a/kvm.write +++ b/kvm.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2012-2014 Codethink Limited +# Copyright (C) 2012-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for deploying to KVM+libvirt. diff --git a/nfsboot.check b/nfsboot.check index 806e560a..e273f61c 100755 --- a/nfsboot.check +++ b/nfsboot.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'nfsboot' write extension''' diff --git a/nfsboot.configure b/nfsboot.configure index 660d9c39..6a68dc48 100755 --- a/nfsboot.configure +++ b/nfsboot.configure @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013-2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Remove all networking interfaces. On nfsboot systems, eth0 is set up diff --git a/nfsboot.write b/nfsboot.write index 49d71174..d928775e 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013-2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for deploying to an nfsboot server diff --git a/openstack.check b/openstack.check index 3850d481..4c21b604 100755 --- a/openstack.check +++ b/openstack.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'openstack' write extension''' diff --git a/openstack.write b/openstack.write index d29d2661..67e07c18 100755 --- a/openstack.write +++ b/openstack.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 - 2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for deploying to OpenStack.''' diff --git a/rawdisk.check b/rawdisk.check index 094adb72..9be0ce91 100755 --- a/rawdisk.check +++ b/rawdisk.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'rawdisk' write extension''' diff --git a/rawdisk.write b/rawdisk.write index d91a4d5f..6f2d45ba 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2012-2014 Codethink Limited +# Copyright (C) 2012-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for raw disk images.''' diff --git a/set-hostname.configure b/set-hostname.configure index e44c5d56..4b2424d8 100755 --- a/set-hostname.configure +++ b/set-hostname.configure @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Set hostname on system from HOSTNAME. diff --git a/simple-network.configure b/simple-network.configure index b98b202c..13884e9d 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment configuration extension to handle /etc/network/interfaces diff --git a/ssh-rsync.check b/ssh-rsync.check index 11446c28..c3bdfd29 100755 --- a/ssh-rsync.check +++ b/ssh-rsync.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'ssh-rsync' write extension''' diff --git a/ssh-rsync.write b/ssh-rsync.write index c4577026..6d596500 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013-2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for upgrading systems over ssh.''' diff --git a/sysroot.check b/sysroot.check index bfacd3fc..8ed965bd 100755 --- a/sysroot.check +++ b/sysroot.check @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Preparatory checks for Morph 'sysroot' write extension diff --git a/sysroot.write b/sysroot.write index be315365..0ad8d630 100755 --- a/sysroot.write +++ b/sysroot.write @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # A Morph write extension to deploy to another directory diff --git a/tar.check b/tar.check index cbeaf163..f2304d46 100755 --- a/tar.check +++ b/tar.check @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Preparatory checks for Morph 'tar' write extension diff --git a/tar.write b/tar.write index 333626b5..01b545b4 100755 --- a/tar.write +++ b/tar.write @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # A Morph write extension to deploy to a .tar file diff --git a/vdaboot.configure b/vdaboot.configure index b88eb3a8..60de925b 100755 --- a/vdaboot.configure +++ b/vdaboot.configure @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Change the "/" mount point to /dev/vda to use virtio disks. diff --git a/virtualbox-ssh.check b/virtualbox-ssh.check index 57d54db1..a97f3294 100755 --- a/virtualbox-ssh.check +++ b/virtualbox-ssh.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'virtualbox-ssh' write extension''' diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 7eafcff3..774f2b4f 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for deploying to VirtualBox via ssh. -- cgit v1.2.1 From bbc0af46f600aff7c98f8790319fea53dafeca36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Fri, 13 Mar 2015 18:18:55 +0000 Subject: Use the modern way of the GPL copyright header: URL instead real address Change-Id: I992dc0c1d40f563ade56a833162d409b02be90a0 --- add-config-files.configure | 5 ++--- fstab.configure | 5 ++--- initramfs.write | 5 ++--- install-files.configure | 5 ++--- kvm.check | 3 +-- kvm.write | 5 ++--- nfsboot.check | 5 ++--- nfsboot.configure | 5 ++--- nfsboot.write | 5 ++--- openstack.check | 5 ++--- openstack.write | 5 ++--- rawdisk.check | 5 ++--- rawdisk.write | 5 ++--- set-hostname.configure | 5 ++--- simple-network.configure | 5 ++--- ssh-rsync.check | 5 ++--- ssh-rsync.write | 5 ++--- sysroot.check | 3 +-- sysroot.write | 3 +-- tar.check | 5 ++--- tar.write | 5 ++--- vdaboot.configure | 5 ++--- virtualbox-ssh.check | 5 ++--- virtualbox-ssh.write | 3 +-- 24 files changed, 44 insertions(+), 68 deletions(-) diff --git a/add-config-files.configure b/add-config-files.configure index 0094cf6b..2cf96fd1 100755 --- a/add-config-files.configure +++ b/add-config-files.configure @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Copy all files located in $SRC_CONFIG_DIR to the image /etc. diff --git a/fstab.configure b/fstab.configure index a1287ea4..3bbc9102 100755 --- a/fstab.configure +++ b/fstab.configure @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # # =*= License: GPL-2 =*= diff --git a/initramfs.write b/initramfs.write index f8af6d84..1059defa 100755 --- a/initramfs.write +++ b/initramfs.write @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # # =*= License: GPL-2 =*= diff --git a/install-files.configure b/install-files.configure index 04dc5f18..58cf373a 100755 --- a/install-files.configure +++ b/install-files.configure @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013-2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . ''' A Morph configuration extension for adding arbitrary files to a system diff --git a/kvm.check b/kvm.check index b8877a89..62d76453 100755 --- a/kvm.check +++ b/kvm.check @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'kvm' write extension''' diff --git a/kvm.write b/kvm.write index 30b43d6c..0d0c095b 100755 --- a/kvm.write +++ b/kvm.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2012-2014 Codethink Limited +# Copyright (C) 2012-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for deploying to KVM+libvirt. diff --git a/nfsboot.check b/nfsboot.check index 806e560a..e273f61c 100755 --- a/nfsboot.check +++ b/nfsboot.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'nfsboot' write extension''' diff --git a/nfsboot.configure b/nfsboot.configure index 660d9c39..6a68dc48 100755 --- a/nfsboot.configure +++ b/nfsboot.configure @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013-2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Remove all networking interfaces. On nfsboot systems, eth0 is set up diff --git a/nfsboot.write b/nfsboot.write index 49d71174..d928775e 100755 --- a/nfsboot.write +++ b/nfsboot.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013-2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for deploying to an nfsboot server diff --git a/openstack.check b/openstack.check index 3850d481..4c21b604 100755 --- a/openstack.check +++ b/openstack.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'openstack' write extension''' diff --git a/openstack.write b/openstack.write index d29d2661..67e07c18 100755 --- a/openstack.write +++ b/openstack.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 - 2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for deploying to OpenStack.''' diff --git a/rawdisk.check b/rawdisk.check index 094adb72..9be0ce91 100755 --- a/rawdisk.check +++ b/rawdisk.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'rawdisk' write extension''' diff --git a/rawdisk.write b/rawdisk.write index d91a4d5f..6f2d45ba 100755 --- a/rawdisk.write +++ b/rawdisk.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2012-2014 Codethink Limited +# Copyright (C) 2012-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for raw disk images.''' diff --git a/set-hostname.configure b/set-hostname.configure index e44c5d56..4b2424d8 100755 --- a/set-hostname.configure +++ b/set-hostname.configure @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Set hostname on system from HOSTNAME. diff --git a/simple-network.configure b/simple-network.configure index b98b202c..13884e9d 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment configuration extension to handle /etc/network/interfaces diff --git a/ssh-rsync.check b/ssh-rsync.check index 11446c28..c3bdfd29 100755 --- a/ssh-rsync.check +++ b/ssh-rsync.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'ssh-rsync' write extension''' diff --git a/ssh-rsync.write b/ssh-rsync.write index c4577026..6d596500 100755 --- a/ssh-rsync.write +++ b/ssh-rsync.write @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2013-2014 Codethink Limited +# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for upgrading systems over ssh.''' diff --git a/sysroot.check b/sysroot.check index bfacd3fc..8ed965bd 100755 --- a/sysroot.check +++ b/sysroot.check @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Preparatory checks for Morph 'sysroot' write extension diff --git a/sysroot.write b/sysroot.write index be315365..0ad8d630 100755 --- a/sysroot.write +++ b/sysroot.write @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # A Morph write extension to deploy to another directory diff --git a/tar.check b/tar.check index cbeaf163..f2304d46 100755 --- a/tar.check +++ b/tar.check @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Preparatory checks for Morph 'tar' write extension diff --git a/tar.write b/tar.write index 333626b5..01b545b4 100755 --- a/tar.write +++ b/tar.write @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # A Morph write extension to deploy to a .tar file diff --git a/vdaboot.configure b/vdaboot.configure index b88eb3a8..60de925b 100755 --- a/vdaboot.configure +++ b/vdaboot.configure @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (C) 2013 Codethink Limited +# Copyright (C) 2013,2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . # Change the "/" mount point to /dev/vda to use virtio disks. diff --git a/virtualbox-ssh.check b/virtualbox-ssh.check index 57d54db1..a97f3294 100755 --- a/virtualbox-ssh.check +++ b/virtualbox-ssh.check @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (C) 2014 Codethink Limited +# Copyright (C) 2014-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''Preparatory checks for Morph 'virtualbox-ssh' write extension''' diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write index 7eafcff3..774f2b4f 100755 --- a/virtualbox-ssh.write +++ b/virtualbox-ssh.write @@ -11,8 +11,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program. If not, see . '''A Morph deployment write extension for deploying to VirtualBox via ssh. -- cgit v1.2.1 From f1dce77123a814527f96d415b69ee10a06b8d36c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Fri, 6 Mar 2015 12:21:05 +0000 Subject: simple-network.configure: Move the generation of /etc/network/interfaces to a function --- simple-network.configure | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/simple-network.configure b/simple-network.configure index 13884e9d..c533ff22 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -48,8 +48,13 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): self.status(msg="Processing NETWORK_CONFIG=%(nc)s", nc=network_config) stanzas = self.parse_network_stanzas(network_config) - iface_file = self.generate_iface_file(stanzas) + self.generate_interfaces_file(args, stanzas) + + def generate_interfaces_file(self, args, stanzas): + """Generate /etc/network/interfaces file""" + + iface_file = self.generate_iface_file(stanzas) with open(os.path.join(args[0], "etc/network/interfaces"), "w") as f: f.write(iface_file) -- cgit v1.2.1 From e36de9eb1129db24b47dfa84cd96c5e2e2e6a327 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Fri, 6 Mar 2015 12:39:58 +0000 Subject: simple-network.configure: Generate networkd .network files as well --- simple-network.configure | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/simple-network.configure b/simple-network.configure index c533ff22..5b02142c 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -50,6 +50,7 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): stanzas = self.parse_network_stanzas(network_config) self.generate_interfaces_file(args, stanzas) + self.generate_networkd_files(args, stanzas) def generate_interfaces_file(self, args, stanzas): """Generate /etc/network/interfaces file""" @@ -87,6 +88,38 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): lines += [""] return "\n".join(lines) + def generate_networkd_files(self, args, stanzas): + """Generate .network files""" + + for i, stanza in enumerate(stanzas, 50): + iface_file = self.generate_networkd_file(stanza) + + if iface_file is None: + continue + + path = os.path.join(args[0], "etc", "systemd", "network", + "%s-%s.network" % (i, stanza['name'])) + + with open(path, "w") as f: + f.write(iface_file) + + def generate_networkd_file(self, stanza): + """Generate an .network file from the provided data.""" + + name = stanza['name'] + itype = stanza['type'] + pairs = stanza['args'].items() + + if itype == "loopback": + return + + lines = ["[Match]"] + lines += ["Name=%s\n" % name] + lines += ["[Network]"] + if itype == "dhcp": + lines += ["DHCP=yes"] + + return "\n".join(lines) def parse_network_stanzas(self, config): """Parse a network config environment variable into stanzas. -- cgit v1.2.1 From 8d36b6f71a2858843d9a7c6a878ebb2c85fda2a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Fri, 6 Mar 2015 12:56:19 +0000 Subject: simple-network.configure: Add function to convert mask to cidr suffix 255.255.255.0 -> 24 --- simple-network.configure | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/simple-network.configure b/simple-network.configure index 5b02142c..0e514e0b 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -121,6 +121,13 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): return "\n".join(lines) + def convert_net_mask_to_cidr_suffix(self, mask): + """Convert dotted decimal form of a subnet mask to CIDR suffix notation + + For example: 255.255.255.0 -> 24 + """ + return sum(bin(int(x)).count('1') for x in mask.split('.')) + def parse_network_stanzas(self, config): """Parse a network config environment variable into stanzas. -- cgit v1.2.1 From c33a852ba7b72258165d33f4e6810455483abc3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Fri, 6 Mar 2015 12:55:01 +0000 Subject: simple-network.configure: process pairs of parameters in a function --- simple-network.configure | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/simple-network.configure b/simple-network.configure index 0e514e0b..a058cba7 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -118,9 +118,38 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): lines += ["[Network]"] if itype == "dhcp": lines += ["DHCP=yes"] + else: + lines += self.generate_networkd_entries(pairs) return "\n".join(lines) + def generate_networkd_entries(self, pairs): + """Generate networkd configuration entries with the other parameters""" + + address = None + netmask = None + gateway = None + lines = [] + for pair in pairs: + if pair[0] == 'address': + address = pair[1] + elif pair[0] == 'netmask': + netmask = pair[1] + elif pair[0] == 'gateway': + gateway = pair[1] + + if address and netmask: + network_suffix = self.convert_net_mask_to_cidr_suffix (netmask); + address_line = address + '/' + str(network_suffix) + lines += ["Address=%s" % address_line] + elif address or netmask: + raise Exception('address and netmask must be specified together') + + if gateway is not None: + lines += ["Gateway=%s" % gateway] + + return lines + def convert_net_mask_to_cidr_suffix(self, mask): """Convert dotted decimal form of a subnet mask to CIDR suffix notation -- cgit v1.2.1 From fcc6c405d5efd0043419bf7696a875fb18b7930a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Fri, 6 Mar 2015 13:18:08 +0000 Subject: simple-network.configure: Generate default network config files in a function Use DHCP by defaul in the default interfaces: - for /etc/networ/interfaces: "lo:loopback;eth0:dhcp,hostname=$(hostname)" - for networkd: "e*:dhcp" --- simple-network.configure | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/simple-network.configure b/simple-network.configure index a058cba7..fbbe6c4e 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -42,15 +42,29 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): ''' def process_args(self, args): - network_config = os.environ.get( - "NETWORK_CONFIG", "lo:loopback;eth0:dhcp,hostname=$(hostname)") + network_config = os.environ.get("NETWORK_CONFIG") - self.status(msg="Processing NETWORK_CONFIG=%(nc)s", nc=network_config) + if network_config is None: + self.generate_default_network_config(args) + else: + self.status(msg="Processing NETWORK_CONFIG=%(nc)s", nc=network_config) + + stanzas = self.parse_network_stanzas(network_config) + + self.generate_interfaces_file(args, stanzas) + self.generate_networkd_files(args, stanzas) + + def generate_default_network_config(self, args): + """Generate default network configuration: DHCP in all the interfaces""" + + default_network_config_interfaces = "lo:loopback;eth0:dhcp,hostname=$(hostname)" + default_network_config_networkd = "e*:dhcp" - stanzas = self.parse_network_stanzas(network_config) + stanzas_interfaces = self.parse_network_stanzas(default_network_config_interfaces) + stanzas_networkd = self.parse_network_stanzas(default_network_config_networkd) - self.generate_interfaces_file(args, stanzas) - self.generate_networkd_files(args, stanzas) + self.generate_interfaces_file(args, stanzas_interfaces) + self.generate_networkd_files(args, stanzas_networkd) def generate_interfaces_file(self, args, stanzas): """Generate /etc/network/interfaces file""" -- cgit v1.2.1 From fe364c83daf5f66c685a00e3b368322d53139401 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Wed, 11 Mar 2015 19:48:02 +0000 Subject: simple-network.configure: Update documentation --- simple-network.configure | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/simple-network.configure b/simple-network.configure index fbbe6c4e..7c078c93 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -13,13 +13,15 @@ # You should have received a copy of the GNU General Public License along # with this program. If not, see . -'''A Morph deployment configuration extension to handle /etc/network/interfaces +'''A Morph deployment configuration extension to handle network configutation -This extension prepares /etc/network/interfaces with the interfaces specified -during deployment. +This extension prepares /etc/network/interfaces and networkd .network files +in /etc/systemd/network/ with the interfaces specified during deployment. If no network configuration is provided, eth0 will be configured for DHCP -with the hostname of the system. +with the hostname of the system in the case of /etc/network/interfaces. +In the case of networkd, any interface starting by e* will be configured +for DHCP ''' @@ -36,9 +38,10 @@ class SimpleNetworkError(morphlib.Error): class SimpleNetworkConfigurationExtension(cliapp.Application): - '''Configure /etc/network/interfaces + '''Configure /etc/network/interfaces and generate networkd .network files - Reading NETWORK_CONFIG, this extension sets up /etc/network/interfaces. + Reading NETWORK_CONFIG, this extension sets up /etc/network/interfaces + and .network files in /etc/systemd/network/. ''' def process_args(self, args): -- cgit v1.2.1 From 2021f504c66ab367e4039ff91b49f97fd926779a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Wed, 11 Mar 2015 19:48:17 +0000 Subject: simple-network.configure: Rename networkd file generated by systemd chunk Rename instead removal in case the user is already using a 10-dhcp.network file --- simple-network.configure | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/simple-network.configure b/simple-network.configure index 7c078c93..a347ebf9 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -47,6 +47,8 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): def process_args(self, args): network_config = os.environ.get("NETWORK_CONFIG") + self.rename_networkd_chunk_file(args) + if network_config is None: self.generate_default_network_config(args) else: @@ -57,6 +59,33 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): self.generate_interfaces_file(args, stanzas) self.generate_networkd_files(args, stanzas) + def rename_networkd_chunk_file(self, args): + """Rename the 10-dchp.network file generated in the systemd chunk + + The systemd chunk will place something in 10-dhcp.network, which will + have higher precedence than anything added in this extension (we + start at 50-*). + + We should check for that file and rename it instead remove it in + case the file is being used by the user. + + Until both the following happen, we should continue to rename that + default config file: + + 1. simple-network.configure is always run when systemd is included + 2. We've been building systems without systemd including that default + networkd config for long enough that nobody should be including + that config file. + """ + file_path = os.path.join(args[0], "etc", "systemd", "network", + "10-dhcp.network") + try: + os.rename(file_path, file_path + ".morph") + self.status(msg="Renaming networkd file from systemd chunk: %(f)s \ + to %(f)s.morph", f=file_path) + except OSError: + pass + def generate_default_network_config(self, args): """Generate default network configuration: DHCP in all the interfaces""" -- cgit v1.2.1 From 76bc6121a0cee508cc3feb1cac85b6a3d093b5cc Mon Sep 17 00:00:00 2001 From: Adam Coldrick Date: Tue, 17 Mar 2015 14:03:29 +0000 Subject: Fix line lengths in morphlib/exts/simple-network.configure Some lines were more than 79 characters long. This was causing ./check --full to fail on master of morph. This commit fixes the lines in question. Change-Id: I80969d2d89d3922e021b716c250796188d7a7f4c --- simple-network.configure | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/simple-network.configure b/simple-network.configure index a347ebf9..61113325 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -52,7 +52,8 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): if network_config is None: self.generate_default_network_config(args) else: - self.status(msg="Processing NETWORK_CONFIG=%(nc)s", nc=network_config) + self.status(msg="Processing NETWORK_CONFIG=%(nc)s", + nc=network_config) stanzas = self.parse_network_stanzas(network_config) @@ -87,13 +88,16 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): pass def generate_default_network_config(self, args): - """Generate default network configuration: DHCP in all the interfaces""" + """Generate default network config: DHCP in all the interfaces""" - default_network_config_interfaces = "lo:loopback;eth0:dhcp,hostname=$(hostname)" + default_network_config_interfaces = "lo:loopback;" \ + "eth0:dhcp,hostname=$(hostname)" default_network_config_networkd = "e*:dhcp" - stanzas_interfaces = self.parse_network_stanzas(default_network_config_interfaces) - stanzas_networkd = self.parse_network_stanzas(default_network_config_networkd) + stanzas_interfaces = self.parse_network_stanzas( + default_network_config_interfaces) + stanzas_networkd = self.parse_network_stanzas( + default_network_config_networkd) self.generate_interfaces_file(args, stanzas_interfaces) self.generate_networkd_files(args, stanzas_networkd) -- cgit v1.2.1 From ed6de447693fcf4a0b9240cc9943f065e9585fee Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Sat, 14 Mar 2015 17:30:43 +0000 Subject: Make fstab.configure use write_from_dict --- fstab.configure | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/fstab.configure b/fstab.configure index 3bbc9102..b9154eee 100755 --- a/fstab.configure +++ b/fstab.configure @@ -1,5 +1,6 @@ -#!/usr/bin/python -# Copyright (C) 2013,2015 Codethink Limited +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright © 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -19,21 +20,9 @@ import os import sys +import morphlib -def asciibetical(strings): +envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('FSTAB_')} - def key(s): - return [ord(c) for c in s] - - return sorted(strings, key=key) - - -fstab_filename = os.path.join(sys.argv[1], 'etc', 'fstab') - -fstab_vars = asciibetical(x for x in os.environ if x.startswith('FSTAB_')) -with open(fstab_filename, 'a') as f: - for var in fstab_vars: - f.write('%s\n' % os.environ[var]) - -os.chown(fstab_filename, 0, 0) -os.chmod(fstab_filename, 0644) +conf_file = os.path.join(sys.argv[1], 'etc/fstab') +morphlib.util.write_from_dict(conf_file, envvars) -- cgit v1.2.1 From 813573c351e9f790992e66ee225965340b55aceb Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Sat, 14 Mar 2015 17:31:12 +0000 Subject: Add hosts.configure This adds a new config extension to allow deployments to write to /etc/hosts by adding HOSTS_x: to a cluster morph in a similar manner to the fstab.configure extension. --- hosts.configure | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100755 hosts.configure diff --git a/hosts.configure b/hosts.configure new file mode 100755 index 00000000..6b068d04 --- /dev/null +++ b/hosts.configure @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# =*= License: GPL-2 =*= + + +import os +import sys +import socket + +import morphlib + +def validate(var, line): + xs = line.split() + if len(xs) == 0: + raise morphlib.Error("`%s: %s': line is empty" % (var, line)) + + ip = xs[0] + hostnames = xs[1:] + + if len(hostnames) == 0: + raise morphlib.Error("`%s: %s': missing hostname" % (var, line)) + + family = socket.AF_INET6 if ':' in ip else socket.AF_INET + + try: + socket.inet_pton(family, ip) + except socket.error: + raise morphlib.Error("`%s: %s' invalid ip" % (var, ip)) + +envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('HOSTS_')} + +conf_file = os.path.join(sys.argv[1], 'etc/hosts') +morphlib.util.write_from_dict(conf_file, envvars, validate) -- cgit v1.2.1 From 8e691df0c964bbb8b20e8e66e37402a5fc854b37 Mon Sep 17 00:00:00 2001 From: Javier Jardon Date: Mon, 23 Mar 2015 23:14:05 +0000 Subject: simple-network: separate creation of directory and file path Change-Id: Ic715815bbad3ef1ee9ab457b62a194eaef45744c --- simple-network.configure | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/simple-network.configure b/simple-network.configure index 61113325..42bf1b82 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -106,7 +106,10 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): """Generate /etc/network/interfaces file""" iface_file = self.generate_iface_file(stanzas) - with open(os.path.join(args[0], "etc/network/interfaces"), "w") as f: + + directory_path = os.path.join(args[0], "etc", "network") + file_path = os.path.join(directory_path, "interfaces") + with open(file_path, "w") as f: f.write(iface_file) def generate_iface_file(self, stanzas): @@ -147,10 +150,11 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): if iface_file is None: continue - path = os.path.join(args[0], "etc", "systemd", "network", - "%s-%s.network" % (i, stanza['name'])) + directory_path = os.path.join(args[0], "etc", "systemd", "network") + file_path = os.path.join(directory_path, + "%s-%s.network" % (i, stanza['name'])) - with open(path, "w") as f: + with open(file_path, "w") as f: f.write(iface_file) def generate_networkd_file(self, stanza): -- cgit v1.2.1 From da48a28de7461bd46ef777a2ab2f5ca0e5c1185c Mon Sep 17 00:00:00 2001 From: Javier Jardon Date: Mon, 23 Mar 2015 23:02:57 +0000 Subject: simple-network: Add functionn to create a path if it doesnt exist Change-Id: If011a5518fd30914c89b00099b9d05cff5cd3959 --- simple-network.configure | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/simple-network.configure b/simple-network.configure index 42bf1b82..b4988125 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -27,6 +27,7 @@ for DHCP import os import sys +import errno import cliapp import morphlib @@ -256,6 +257,16 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): return output_stanza + def make_sure_path_exists(self, path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise SimpleNetworkError("Unable to create directory '%s'" + % path) + def status(self, **kwargs): '''Provide status output. -- cgit v1.2.1 From 500943a125a1b21c08adcc400a86569693aebfa4 Mon Sep 17 00:00:00 2001 From: Javier Jardon Date: Mon, 23 Mar 2015 23:19:33 +0000 Subject: simple-network: Use function to ensure directory path will exist Change-Id: I44693d15aa5e92d5f09720065788adff34f8685c --- simple-network.configure | 2 ++ 1 file changed, 2 insertions(+) diff --git a/simple-network.configure b/simple-network.configure index b4988125..130b96c9 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -109,6 +109,7 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): iface_file = self.generate_iface_file(stanzas) directory_path = os.path.join(args[0], "etc", "network") + self.make_sure_path_exists(directory_path) file_path = os.path.join(directory_path, "interfaces") with open(file_path, "w") as f: f.write(iface_file) @@ -152,6 +153,7 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): continue directory_path = os.path.join(args[0], "etc", "systemd", "network") + self.make_sure_path_exists(directory_path) file_path = os.path.join(directory_path, "%s-%s.network" % (i, stanza['name'])) -- cgit v1.2.1 From bb3fa0e026b1bab96b4bbbcb1920efcf5f995f37 Mon Sep 17 00:00:00 2001 From: Javier Jardon Date: Mon, 23 Mar 2015 23:27:11 +0000 Subject: simple-network: only try to rename "10-dhcp.network" if it actually exist Change-Id: I1521c0bdec4d7a6812f8988a2349e66b08161de8 --- simple-network.configure | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/simple-network.configure b/simple-network.configure index 130b96c9..1ba94e86 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -81,12 +81,14 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): """ file_path = os.path.join(args[0], "etc", "systemd", "network", "10-dhcp.network") - try: - os.rename(file_path, file_path + ".morph") - self.status(msg="Renaming networkd file from systemd chunk: %(f)s \ - to %(f)s.morph", f=file_path) - except OSError: - pass + + if os.path.isfile(file_path): + try: + os.rename(file_path, file_path + ".morph") + self.status(msg="Renaming networkd file from systemd chunk: \ + %(f)s to %(f)s.morph", f=file_path) + except OSError: + pass def generate_default_network_config(self, args): """Generate default network config: DHCP in all the interfaces""" -- cgit v1.2.1 From a69b342f542f4895ef2bfd9f18a0e56d38b4bc7d Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Mon, 30 Mar 2015 13:34:40 +0000 Subject: Fix: strip 'network=' from NIC_CONFIG Also ensure NIC_CONFIG begins with 'network=', 'bridge=' or is 'user' Change-Id: I3bcbd25eb2c9a05b7fa276697f97a1080cb0316e --- kvm.check | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/kvm.check b/kvm.check index 62d76453..83562e44 100755 --- a/kvm.check +++ b/kvm.check @@ -129,14 +129,22 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): def name(nic_entry): if ',' in nic_entry: - # NETWORK_NAME,mac=12:34,model=e1000... - return nic_entry[:nic_entry.find(',')] + # network=NETWORK_NAME,mac=12:34,model=e1000... + return nic_entry[:nic_entry.find(',')].lstrip('network=') else: - return nic_entry # NETWORK_NAME + return nic_entry.lstrip('network=') # NETWORK_NAME if 'NIC_CONFIG' in os.environ: nics = os.environ['NIC_CONFIG'].split() + for n in nics: + if not (n.startswith('network=') + or n.startswith('bridge=') + or n == 'user'): + raise cliapp.AppException('malformed NIC_CONFIG: %s\n' + " (expected 'bridge=BRIDGE' 'network=NAME'" + " or 'user')" % n) + # --network bridge= is used to specify a bridge # --network user is used to specify a form of NAT # (see the virt-install(1) man page) -- cgit v1.2.1 From 2b35eb5ebef0de0ad78466f41ccfd7792fbf2e40 Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Fri, 3 Apr 2015 20:05:09 +0100 Subject: Make kvm deploy check that host has virt-install This allows us to catch a case where virt-install hasn't been installed on the host we're deploying to much earlier in the deployment process. Change-Id: I413ad804a7e8bef4fc2d1231411e01d30d0cb9e8 --- kvm.check | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/kvm.check b/kvm.check index 83562e44..67cb3d38 100755 --- a/kvm.check +++ b/kvm.check @@ -47,6 +47,7 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): self.check_no_existing_libvirt_vm(ssh_host, vm_name) self.check_extra_disks_exist(ssh_host, self.parse_attach_disks()) self.check_virtual_networks_are_started(ssh_host) + self.check_host_has_virtinstall(ssh_host) def check_and_parse_location(self, location): '''Check and parse the location argument to get relevant data.''' @@ -156,5 +157,13 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): for network in networks: check_virtual_network_is_started(network) + def check_host_has_virtinstall(self, ssh_host): + try: + cliapp.ssh_runcmd(ssh_host, ['which', 'virt-install']) + except cliapp.AppException: + raise cliapp.AppException( + 'virt-install does not seem to be installed on host %s' + % ssh_host) + KvmPlusSshCheckExtension().run() -- cgit v1.2.1 From ef08b64d0c81515a360ceea1d0068a7a9d4233d9 Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Sat, 11 Apr 2015 14:23:48 +0100 Subject: Add template option to install-files conf ext This adds an optional 'template' option to the install-files manifest format. A file declared as a template will be rendered using jinja2 with variables substituted in from the environment. Change-Id: I2ed6fe58f5fff315b42b7e4ec478ada851e0a70d --- install-files.configure | 42 ++++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/install-files.configure b/install-files.configure index 58cf373a..c2970243 100755 --- a/install-files.configure +++ b/install-files.configure @@ -30,6 +30,12 @@ import shlex import shutil import stat +try: + import jinja2 + jinja_available = True +except ImportError: + jinja_available = False + class InstallFilesConfigureExtension(cliapp.Application): def process_args(self, args): @@ -48,18 +54,20 @@ class InstallFilesConfigureExtension(cliapp.Application): self.install_entry(entry, manifest_dir, target_root) def install_entry(self, entry, manifest_root, target_root): - m = re.match('(overwrite )?([0-7]+) ([0-9]+) ([0-9]+) (\S+)', entry) + m = re.match('(template )?(overwrite )?' + '([0-7]+) ([0-9]+) ([0-9]+) (\S+)', entry) if m: - overwrite = m.group(1) - mode = int(m.group(2), 8) # mode is octal - uid = int(m.group(3)) - gid = int(m.group(4)) - path = m.group(5) + template = m.group(1) + overwrite = m.group(2) + mode = int(m.group(3), 8) # mode is octal + uid = int(m.group(4)) + gid = int(m.group(5)) + path = m.group(6) else: raise cliapp.AppException('Invalid manifest entry, ' - 'format: [overwrite] ' - '') + 'format: [template] [overwrite] ' + ' ') dest_path = os.path.join(target_root, './' + path) if stat.S_ISDIR(mode): @@ -91,8 +99,22 @@ class InstallFilesConfigureExtension(cliapp.Application): raise cliapp.AppException('File already exists at %s' % dest_path) else: - shutil.copyfile(os.path.join(manifest_root, './' + path), - dest_path) + if template: + if not jinja_available: + raise cliapp.AppException( + "Failed to install template file `%s': " + 'install-files templates require jinja2' + % path) + + loader = jinja2.FileSystemLoader(manifest_root) + env = jinja2.Environment(loader=loader, + keep_trailing_newline=True) + + env.get_template(path).stream(os.environ).dump(dest_path) + else: + shutil.copyfile(os.path.join(manifest_root, './' + path), + dest_path) + os.chown(dest_path, uid, gid) os.chmod(dest_path, mode) -- cgit v1.2.1 From 64465445f2a95d74cb4a5bae3ab0d1783d6de68e Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Wed, 15 Apr 2015 15:58:34 +0100 Subject: Add dns option to simple-network conf ext Change-Id: I0f4490d76caca802536b21085ea0d770fb8c0798 --- simple-network.configure | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/simple-network.configure b/simple-network.configure index 1ba94e86..4a70f311 100755 --- a/simple-network.configure +++ b/simple-network.configure @@ -188,7 +188,9 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): address = None netmask = None gateway = None + dns = None lines = [] + for pair in pairs: if pair[0] == 'address': address = pair[1] @@ -196,6 +198,8 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): netmask = pair[1] elif pair[0] == 'gateway': gateway = pair[1] + elif pair[0] == 'dns': + dns = pair[1] if address and netmask: network_suffix = self.convert_net_mask_to_cidr_suffix (netmask); @@ -204,9 +208,12 @@ class SimpleNetworkConfigurationExtension(cliapp.Application): elif address or netmask: raise Exception('address and netmask must be specified together') - if gateway is not None: + if gateway: lines += ["Gateway=%s" % gateway] + if dns: + lines += ["DNS=%s" % dns] + return lines def convert_net_mask_to_cidr_suffix(self, mask): -- cgit v1.2.1 From 985d512ad9969b9216720a7dc9274b41bb2802eb Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Mon, 13 Apr 2015 12:31:46 +0000 Subject: Add distbuild-trove-nfsboot.write The nfsboot.write deployment extension has been deprecated for a while because it's not generally useful. It's only used for deploying distbuild nodes to a Trove, as far as I know. We still need to support setting up a bunch of machines that boot over NFS from a Trove. But we can do this in a special-purpose .write extension. The new distbuild-trove-nfsboot.write is much more efficient than the more generic nfsboot.write: instead of treating each system individually (thus copying an almost identical ~2GB rootfs to the Trove once per node) it copies the system image to the Trove once, and /then/ sets up a rootfs per node. Upgrades are now supported, although the code assumes distbuild nodes are stateless (as they should be) so nothing special is done for upgrades, other than checking that there is already a version of the given system in existance. The new extension does not create an orig/ and run/ version of each system, because there is no need when the deployed system is stateless. There could be further gains in efficiency, but I don't have time to do them right now. This write extension is full of compromises, its goal is to better support the existing users who have a Trove and a distbuild network deployed via NFS. It is specifically not intended to be useful for other purposes. Change-Id: I9a50c58b714ed272212d1d6c55b289aaa96051b1 --- distbuild-trove-nfsboot.check | 150 ++++++++++++++++++++++ distbuild-trove-nfsboot.help | 49 ++++++++ distbuild-trove-nfsboot.write | 283 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 482 insertions(+) create mode 100755 distbuild-trove-nfsboot.check create mode 100644 distbuild-trove-nfsboot.help create mode 100755 distbuild-trove-nfsboot.write diff --git a/distbuild-trove-nfsboot.check b/distbuild-trove-nfsboot.check new file mode 100755 index 00000000..38c491e5 --- /dev/null +++ b/distbuild-trove-nfsboot.check @@ -0,0 +1,150 @@ +#!/usr/bin/python +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +'''Preparatory checks for Morph 'distbuild-trove-nfsboot' write extension''' + +import cliapp +import logging +import os + +import morphlib.writeexts + + +class DistbuildTroveNFSBootCheckExtension(morphlib.writeexts.WriteExtension): + + nfsboot_root = '/srv/nfsboot' + remote_user = 'root' + + required_vars = [ + 'DISTBUILD_CONTROLLER', + 'DISTBUILD_GIT_SERVER', + 'DISTBUILD_SHARED_ARTIFACT_CACHE', + 'DISTBUILD_TROVE_ID', + 'DISTBUILD_WORKERS', + 'DISTBUILD_WORKER_SSH_KEY', + ] + + def system_path(self, system_name, version_label=None): + if version_label: + return os.path.join(self.nfsboot_root, system_name, 'systems', + version_label, 'run') + else: + return os.path.join(self.nfsboot_root, system_name) + + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + nfs_host = args[0] + nfs_netloc = '%s@%s' % (self.remote_user, nfs_host) + + version_label = os.getenv('VERSION_LABEL', 'factory') + + missing_vars = [var for var in self.required_vars + if not var in os.environ] + if missing_vars: + raise cliapp.AppException( + 'Please set: %s' % ', '.join(missing_vars)) + + controllers = os.getenv('DISTBUILD_CONTROLLER').split() + workers = os.getenv('DISTBUILD_WORKERS').split() + + if len(controllers) != 1: + raise cliapp.AppException('Please specify exactly one controller.') + + if len(workers) == 0: + raise cliapp.AppException('Please specify at least one worker.') + + upgrade = self.get_environment_boolean('UPGRADE') + + self.check_good_server(nfs_netloc) + + system_names = set(controllers + workers) + for system_name in system_names: + if upgrade: + self.check_upgradeable(nfs_netloc, system_name, version_label) + else: + system_path = self.system_path(system_name) + + if self.remote_directory_exists(nfs_netloc, system_path): + if self.get_environment_boolean('OVERWRITE') == False: + raise cliapp.AppException( + 'System %s already exists at %s:%s. Try `morph ' + 'upgrade` instead of `morph deploy`.' % ( + system_name, nfs_netloc, system_path)) + + def check_good_server(self, netloc): + # FIXME: assumes root + self.check_ssh_connectivity(netloc.split('@')[-1]) + + # Is an NFS server + try: + cliapp.ssh_runcmd( + netloc, ['test', '-e', '/etc/exports']) + except cliapp.AppException: + raise cliapp.AppException('server %s is not an nfs server' + % netloc) + try: + cliapp.ssh_runcmd( + netloc, ['systemctl', 'is-enabled', 'nfs-server.service']) + + except cliapp.AppException: + raise cliapp.AppException('server %s does not control its ' + 'nfs server by systemd' % netloc) + + # TFTP server exports /srv/nfsboot/tftp + tftp_root = os.path.join(self.nfsboot_root, 'tftp') + try: + cliapp.ssh_runcmd( + netloc, ['test' , '-d', tftp_root]) + except cliapp.AppException: + raise cliapp.AppException('server %s does not export %s' % + (netloc, tftp_root)) + + def check_upgradeable(self, nfs_netloc, system_name, version_label): + '''Check that there is already a version of the system present. + + Distbuild nodes are stateless, so an upgrade is actually pretty much + the same as an initial deployment. This test is just a sanity check. + + ''' + system_path = self.system_path(system_name) + system_version_path = self.system_path(system_name, version_label) + + if not self.remote_directory_exists(nfs_netloc, system_path): + raise cliapp.AppException( + 'System %s not found at %s:%s, cannot deploy an upgrade.' % ( + system_name, nfs_netloc, system_path)) + + if self.remote_directory_exists(nfs_netloc, system_version_path): + if self.get_environment_boolean('OVERWRITE'): + pass + else: + raise cliapp.AppException( + 'System %s version %s already exists at %s:%s.' % ( + system_name, version_label, nfs_netloc, + system_version_path)) + + def remote_directory_exists(self, nfs_netloc, path): + try: + cliapp.ssh_runcmd(nfs_netloc, ['test', '-d', path]) + except cliapp.AppException as e: + logging.debug('SSH exception: %s', e) + return False + + return True + + +DistbuildTroveNFSBootCheckExtension().run() diff --git a/distbuild-trove-nfsboot.help b/distbuild-trove-nfsboot.help new file mode 100644 index 00000000..62f1455c --- /dev/null +++ b/distbuild-trove-nfsboot.help @@ -0,0 +1,49 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + Deploy a distbuild network, using a Trove to serve the kernel and rootfs. + + The `location` argument is the hostname of the Trove system. + + The following configuration values must be specified: + + - DISTBUILD_CONTROLLER: hostname of controller system + - DISTBUILD_WORKERS: hostnames of each worker system + - DISTBUILD_GIT_SERVER: Trove hostname + - DISTBUILD_SHARED_ARTIFACT_CACHE: Trove hostname + - DISTBUILD_TROVE_ID: Trove ID + - DISTBUILD_WORKER_SSH_KEY: SSH key to be used for ssh:// repos + + A note on TROVE_ID: the current distbuild-setup service requires that + a single 'Trove ID' is specified. This is used in Morph for expanding + keyed URLs. If you set TROVE_ID=foo for example, foo:bar will be expanded + to git://$GIT_SERVER/foo, in addition to the standard baserock: and + upstream: prefixes that you can use. + + The WORKER_SSH_KEY must be provided, even if you don't need it. The + distbuild-setup service could be changed to make it optional. + + The following configuration values are optional: + + - HOST_MAP: a list of key=value pairs mapping hostnames to IP addresses, + or fully-qualified domain names. Useful if you + cannot rely on hostname resolution working for your deploment. + + The extension will connect to root@location via ssh to copy the kernel and + rootfs, and configure the nfs server. It will duplicate the kernel and + rootfs once for each node in the distbuild network. + + The deployment mechanism makes assumptions about the bootloader + configuration of the target machines. diff --git a/distbuild-trove-nfsboot.write b/distbuild-trove-nfsboot.write new file mode 100755 index 00000000..a5a5b094 --- /dev/null +++ b/distbuild-trove-nfsboot.write @@ -0,0 +1,283 @@ +#!/usr/bin/python +# Copyright (C) 2013-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +'''Morph .write extension for a distbuild network booting off a Trove with NFS. + +''' + + +import os +import sys +import tempfile + +import cliapp +import morphlib.writeexts + + +class DistbuildTroveNFSBootWriteExtension(morphlib.writeexts.WriteExtension): + + '''Create an NFS root and kernel on TFTP during Morph's deployment. + + See distbuild-trove-nfsboot.help for documentation. + + ''' + + nfsboot_root = '/srv/nfsboot' + remote_user = 'root' + + def system_path(self, system_name, version_label=None): + if version_label: + # The 'run' directory is kind of a historical artifact. Baserock + # systems that have Btrfs root disks maintain an orig/ and a run/ + # subvolume, so that one can find changes that have been made at + # runtime. For distbuild systems, this isn't necessary because the + # root filesystems of the nodes are effectively stateless. However, + # existing systems have bootloaders configured to look for the + # 'run' directory, so we need to keep creating it. + return os.path.join(self.nfsboot_root, system_name, 'systems', + version_label, 'run') + else: + return os.path.join(self.nfsboot_root, system_name) + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + local_system_path, nfs_host = args + + nfs_netloc = '%s@%s' % (self.remote_user, nfs_host) + + version_label = os.getenv('VERSION_LABEL', 'factory') + + controller_name = os.getenv('DISTBUILD_CONTROLLER') + worker_names = os.getenv('DISTBUILD_WORKERS').split() + system_names = set([controller_name] + worker_names) + + git_server = os.getenv('DISTBUILD_GIT_SERVER') + shared_artifact_cache = os.getenv('DISTBUILD_SHARED_ARTIFACT_CACHE') + trove_id = os.getenv('DISTBUILD_TROVE_ID') + worker_ssh_key_path = os.getenv('DISTBUILD_WORKER_SSH_KEY') + + host_map = self.parse_host_map_string(os.getenv('HOST_MAP', '')) + + kernel_relpath = self.find_kernel(local_system_path) + + copied_rootfs = None + for system_name in system_names: + remote_system_path = self.system_path(system_name, version_label) + if copied_rootfs is None: + self.transfer_system( + nfs_netloc, local_system_path, remote_system_path) + copied_rootfs = remote_system_path + else: + self.duplicate_remote_system( + nfs_netloc, copied_rootfs, remote_system_path) + + for system_name in system_names: + remote_system_path = self.system_path(system_name, version_label) + self.link_kernel_to_tftpboot_path( + nfs_netloc, system_name, version_label, kernel_relpath) + self.set_hostname( + nfs_netloc, system_name, remote_system_path) + self.write_distbuild_config( + nfs_netloc, system_name, remote_system_path, git_server, + shared_artifact_cache, trove_id, worker_ssh_key_path, + controller_name, worker_names, host_map=host_map) + + self.configure_nfs_exports(nfs_netloc, system_names) + + for system_name in system_names: + self.update_default_version(nfs_netloc, system_name, version_label) + + def parse_host_map_string(self, host_map_string): + '''Parse the HOST_MAP variable + + Returns a dict mapping hostname to value (where value is an IP + address, a fully-qualified domain name, an alternate hostname, or + whatever). + + ''' + pairs = host_map_string.split(' ') + return morphlib.util.parse_environment_pairs({}, pairs) + + def transfer_system(self, nfs_netloc, local_system_path, + remote_system_path): + self.status(msg='Copying rootfs to %(nfs_netloc)s', + nfs_netloc=nfs_netloc) + cliapp.ssh_runcmd( + nfs_netloc, ['mkdir', '-p', remote_system_path]) + # The deployed rootfs may have been created by OSTree, so definitely + # don't pass --hard-links to `rsync`. + cliapp.runcmd( + ['rsync', '--archive', '--delete', '--info=progress2', + '--protect-args', '--partial', '--sparse', '--xattrs', + local_system_path + '/', + '%s:%s' % (nfs_netloc, remote_system_path)], stdout=sys.stdout) + + def duplicate_remote_system(self, nfs_netloc, source_system_path, + target_system_path): + self.status(msg='Duplicating rootfs to %(target_system_path)s', + target_system_path=target_system_path) + cliapp.ssh_runcmd(nfs_netloc, + ['mkdir', '-p', target_system_path]) + # We can't pass --info=progress2 here, because it may not be available + # in the remote 'rsync'. The --info setting was added in RSync 3.1.0, + # old versions of Baserock have RSync 3.0.9. So the user doesn't get + # any progress info on stdout for the 'duplicate' stage. + cliapp.ssh_runcmd(nfs_netloc, + ['rsync', '--archive', '--delete', '--protect-args', '--partial', + '--sparse', '--xattrs', source_system_path + '/', + target_system_path], stdout=sys.stdout) + + def find_kernel(self, local_system_path): + bootdir = os.path.join(local_system_path, 'boot') + image_names = ['vmlinuz', 'zImage', 'uImage'] + + for name in image_names: + try_path = os.path.join(bootdir, name) + if os.path.exists(try_path): + kernel_path = os.path.relpath(try_path, local_system_path) + break + else: + raise cliapp.AppException( + 'Could not find a kernel in the system: none of ' + '%s found' % ', '.join(image_names)) + return kernel_path + + def link_kernel_to_tftpboot_path(self, nfs_netloc, system_name, + version_label, kernel_relpath): + '''Create links for TFTP server for a system's kernel.''' + + remote_system_path = self.system_path(system_name, version_label) + kernel_dest = os.path.join(remote_system_path, kernel_relpath) + + self.status(msg='Creating links to %(name)s kernel in tftp directory', + name=system_name) + tftp_dir = os.path.join(self.nfsboot_root , 'tftp') + + versioned_kernel_name = "%s-%s" % (system_name, version_label) + kernel_name = system_name + + cliapp.ssh_runcmd(nfs_netloc, + ['ln', '-f', kernel_dest, + os.path.join(tftp_dir, versioned_kernel_name)]) + + cliapp.ssh_runcmd(nfs_netloc, + ['ln', '-sf', versioned_kernel_name, + os.path.join(tftp_dir, kernel_name)]) + + def set_remote_file_contents(self, nfs_netloc, path, text): + with tempfile.NamedTemporaryFile() as f: + f.write(text) + f.flush() + cliapp.runcmd( + ['scp', f.name, '%s:%s' % (nfs_netloc, path)]) + + def set_hostname(self, nfs_netloc, system_name, system_path): + hostname_path = os.path.join(system_path, 'etc', 'hostname') + self.set_remote_file_contents( + nfs_netloc, hostname_path, system_name + '\n') + + def write_distbuild_config(self, nfs_netloc, system_name, system_path, + git_server, shared_artifact_cache, trove_id, + worker_ssh_key_path, controller_name, + worker_names, host_map = {}): + '''Write /etc/distbuild/distbuild.conf on the node. + + This .write extension takes advantage of the 'generic' mode of + distbuild.configure. Each node is not configured until first-boot, + when distbuild-setup.service runs and configures the node based on the + contents of /etc/distbuild/distbuild.conf. + + ''' + def host(hostname): + return host_map.get(hostname, hostname) + + config = { + 'ARTIFACT_CACHE_SERVER': host(shared_artifact_cache), + 'CONTROLLERHOST': host(controller_name), + 'TROVE_HOST': host(git_server), + 'TROVE_ID': trove_id, + 'DISTBUILD_CONTROLLER': system_name == controller_name, + 'DISTBUILD_WORKER': system_name in worker_names, + 'WORKERS': ', '.join(map(host, worker_names)), + 'WORKER_SSH_KEY': '/etc/distbuild/worker.key', + } + + config_text = '\n'.join( + '%s: %s' % (key, value) for key, value in config.iteritems()) + config_text = \ + '# Generated by distbuild-trove-nfsboot.write\n' + \ + config_text + '\n' + path = os.path.join(system_path, 'etc', 'distbuild') + cliapp.ssh_runcmd( + nfs_netloc, ['mkdir', '-p', path]) + cliapp.runcmd( + ['scp', worker_ssh_key_path, '%s:%s' % (nfs_netloc, path)]) + self.set_remote_file_contents( + nfs_netloc, os.path.join(path, 'distbuild.conf'), config_text) + + def configure_nfs_exports(self, nfs_netloc, system_names): + '''Ensure the Trove is set up to export the NFS roots we need. + + This doesn't handle setting up the TFTP daemon. We assume that is + already running. + + ''' + for system_name in system_names: + exported_path = self.system_path(system_name) + exports_path = '/etc/exports' + + # Rather ugly SSH hackery follows to ensure each system path is + # listed in /etc/exports. + try: + cliapp.ssh_runcmd( + nfs_netloc, ['grep', '-q', exported_path, exports_path]) + except cliapp.AppException: + ip_mask = '*' + options = 'rw,no_subtree_check,no_root_squash,async' + exports_string = '%s %s(%s)\n' % (exported_path, ip_mask, + options) + exports_append_sh = '''\ + set -eu + target="$1" + temp=$(mktemp) + cat "$target" > "$temp" + cat >> "$temp" + mv "$temp" "$target" + ''' + cliapp.ssh_runcmd( + nfs_netloc, + ['sh', '-c', exports_append_sh, '--', exports_path], + feed_stdin=exports_string) + + cliapp.ssh_runcmd(nfs_netloc, + ['systemctl', 'restart', 'nfs-server.service']) + + def update_default_version(self, remote_netloc, system_name, + version_label): + self.status(msg='Linking \'default\' to %(version)s for %(system)s', + version=version_label, system=system_name) + system_path = self.system_path(system_name) + system_version_path = os.path.join(system_path, 'systems', + version_label) + default_path = os.path.join(system_path, 'systems', 'default') + + cliapp.ssh_runcmd(remote_netloc, + ['ln', '-sfn', system_version_path, default_path]) + + +DistbuildTroveNFSBootWriteExtension().run() -- cgit v1.2.1 From cea5244d54568d4ac0e0182c754c623a0fdae79e Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Thu, 23 Apr 2015 13:52:16 +0000 Subject: Rename help file to work with 'help-extensions' subcommand Change-Id: Ibf7cf1f81998678f9354d77f52e54344294e89f7 --- distbuild-trove-nfsboot.help | 49 -------------------------------------- distbuild-trove-nfsboot.write.help | 49 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 49 deletions(-) delete mode 100644 distbuild-trove-nfsboot.help create mode 100644 distbuild-trove-nfsboot.write.help diff --git a/distbuild-trove-nfsboot.help b/distbuild-trove-nfsboot.help deleted file mode 100644 index 62f1455c..00000000 --- a/distbuild-trove-nfsboot.help +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - Deploy a distbuild network, using a Trove to serve the kernel and rootfs. - - The `location` argument is the hostname of the Trove system. - - The following configuration values must be specified: - - - DISTBUILD_CONTROLLER: hostname of controller system - - DISTBUILD_WORKERS: hostnames of each worker system - - DISTBUILD_GIT_SERVER: Trove hostname - - DISTBUILD_SHARED_ARTIFACT_CACHE: Trove hostname - - DISTBUILD_TROVE_ID: Trove ID - - DISTBUILD_WORKER_SSH_KEY: SSH key to be used for ssh:// repos - - A note on TROVE_ID: the current distbuild-setup service requires that - a single 'Trove ID' is specified. This is used in Morph for expanding - keyed URLs. If you set TROVE_ID=foo for example, foo:bar will be expanded - to git://$GIT_SERVER/foo, in addition to the standard baserock: and - upstream: prefixes that you can use. - - The WORKER_SSH_KEY must be provided, even if you don't need it. The - distbuild-setup service could be changed to make it optional. - - The following configuration values are optional: - - - HOST_MAP: a list of key=value pairs mapping hostnames to IP addresses, - or fully-qualified domain names. Useful if you - cannot rely on hostname resolution working for your deploment. - - The extension will connect to root@location via ssh to copy the kernel and - rootfs, and configure the nfs server. It will duplicate the kernel and - rootfs once for each node in the distbuild network. - - The deployment mechanism makes assumptions about the bootloader - configuration of the target machines. diff --git a/distbuild-trove-nfsboot.write.help b/distbuild-trove-nfsboot.write.help new file mode 100644 index 00000000..62f1455c --- /dev/null +++ b/distbuild-trove-nfsboot.write.help @@ -0,0 +1,49 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + Deploy a distbuild network, using a Trove to serve the kernel and rootfs. + + The `location` argument is the hostname of the Trove system. + + The following configuration values must be specified: + + - DISTBUILD_CONTROLLER: hostname of controller system + - DISTBUILD_WORKERS: hostnames of each worker system + - DISTBUILD_GIT_SERVER: Trove hostname + - DISTBUILD_SHARED_ARTIFACT_CACHE: Trove hostname + - DISTBUILD_TROVE_ID: Trove ID + - DISTBUILD_WORKER_SSH_KEY: SSH key to be used for ssh:// repos + + A note on TROVE_ID: the current distbuild-setup service requires that + a single 'Trove ID' is specified. This is used in Morph for expanding + keyed URLs. If you set TROVE_ID=foo for example, foo:bar will be expanded + to git://$GIT_SERVER/foo, in addition to the standard baserock: and + upstream: prefixes that you can use. + + The WORKER_SSH_KEY must be provided, even if you don't need it. The + distbuild-setup service could be changed to make it optional. + + The following configuration values are optional: + + - HOST_MAP: a list of key=value pairs mapping hostnames to IP addresses, + or fully-qualified domain names. Useful if you + cannot rely on hostname resolution working for your deploment. + + The extension will connect to root@location via ssh to copy the kernel and + rootfs, and configure the nfs server. It will duplicate the kernel and + rootfs once for each node in the distbuild network. + + The deployment mechanism makes assumptions about the bootloader + configuration of the target machines. -- cgit v1.2.1 From 8a56b40116e96905c1cb8d84c231f858a3ff8388 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Mon, 13 Apr 2015 18:29:36 +0100 Subject: Add install-essential-files configuration extension This is meant to add essential system files like /etc/profile, /etc/os-release ... Change-Id: I3d67b3a452b32205c5d3c7303d128bda80ce75de --- install-essential-files.configure | 42 ++++++++++++++++++++++++++++++++++ install-essential-files.configure.help | 20 ++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100755 install-essential-files.configure create mode 100644 install-essential-files.configure.help diff --git a/install-essential-files.configure b/install-essential-files.configure new file mode 100755 index 00000000..2779b0d4 --- /dev/null +++ b/install-essential-files.configure @@ -0,0 +1,42 @@ +#!/usr/bin/env python2 +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +''' A Morph configuration extension for adding essential files to a system + +It will read the manifest files located in essential-files/manifest, +then use the contens of those files to determine which files +to install into the target system. + +''' + +import subprocess +import os + +import cliapp + +class InstallEssentialFilesConfigureExtension(cliapp.Application): + + def process_args(self, args): + target_root = args[0] + os.environ["INSTALL_FILES"] = "essential-files/manifest" + self.install_essential_files(target_root) + + def install_essential_files(self, target_root): + command = os.path.join(os.path.dirname(__file__), + "install-files.configure") + subprocess.check_call([command, target_root]) + +InstallEssentialFilesConfigureExtension().run() diff --git a/install-essential-files.configure.help b/install-essential-files.configure.help new file mode 100644 index 00000000..1d123839 --- /dev/null +++ b/install-essential-files.configure.help @@ -0,0 +1,20 @@ +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + This installs files from the essential-files/ folder in your definitions.git + repo, according to essential-files/manifest. + + It wraps the install-files.configure extension. Take a look to that + extension help to know more about the format of the manifest file. -- cgit v1.2.1 From 7c6ab30c70e141533de6af3257515002259757e1 Mon Sep 17 00:00:00 2001 From: Francisco Redondo Marchena Date: Tue, 28 Apr 2015 18:37:53 +0000 Subject: Fix lines bigger than 79 characters in install-essential-files.configure.help This fixes morph check. Change-Id: I5f9c8f5e73c1976b623f9a75f97b5e9aa1887c44 --- install-essential-files.configure.help | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install-essential-files.configure.help b/install-essential-files.configure.help index 1d123839..9148aeff 100644 --- a/install-essential-files.configure.help +++ b/install-essential-files.configure.help @@ -13,8 +13,8 @@ # with this program; if not, see . help: | - This installs files from the essential-files/ folder in your definitions.git - repo, according to essential-files/manifest. + This installs files from the essential-files/ folder in your + definitions.git repo, according to essential-files/manifest. It wraps the install-files.configure extension. Take a look to that extension help to know more about the format of the manifest file. -- cgit v1.2.1 From 7e475f6dc71ae5fb453d91db8cefbf731c9123d2 Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Wed, 28 May 2014 14:37:28 +0100 Subject: Add ssh keys conf ext Change-Id: I4e7888cbff2e4708154538f8f0a48aeaa1a8a811 --- sshkeys.configure | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100755 sshkeys.configure diff --git a/sshkeys.configure b/sshkeys.configure new file mode 100755 index 00000000..7a5a8379 --- /dev/null +++ b/sshkeys.configure @@ -0,0 +1,25 @@ +#!/bin/sh +# +# Copyright 2014 Codethink Ltd +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +if [ "$SSHKEYS" ] +then + install -d -m 700 "$1/root/.ssh" + echo Adding Key in "$SSHKEYS" to authorized_keys file + cat $SSHKEYS >> "$1/root/.ssh/authorized_keys" +fi -- cgit v1.2.1 From 703edae4dc4a284c5710ffa41ed79bd095af8b43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jard=C3=B3n?= Date: Wed, 29 Apr 2015 22:52:43 +0100 Subject: install-files.configure: make possible to overwrite symlinks os.symlink will fail if the origin file/link already exist Change-Id: I8175c8dce699e55c3e39e35dfd45c0c19b8bd96d --- install-files.configure | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/install-files.configure b/install-files.configure index c2970243..341cce61 100755 --- a/install-files.configure +++ b/install-files.configure @@ -24,6 +24,7 @@ to install into the target system. import cliapp import os +import errno import re import sys import shlex @@ -53,6 +54,14 @@ class InstallFilesConfigureExtension(cliapp.Application): for entry in entries: self.install_entry(entry, manifest_dir, target_root) + def force_symlink(self, source, link_name): + try: + os.symlink(source, link_name) + except OSError as e: + if e.errno == errno.EEXIST: + os.remove(link_name) + os.symlink(source, link_name) + def install_entry(self, entry, manifest_root, target_root): m = re.match('(template )?(overwrite )?' '([0-7]+) ([0-9]+) ([0-9]+) (\S+)', entry) @@ -91,7 +100,7 @@ class InstallFilesConfigureExtension(cliapp.Application): else: linkdest = os.readlink(os.path.join(manifest_root, './' + path)) - os.symlink(linkdest, dest_path) + self.force_symlink(linkdest, dest_path) os.lchown(dest_path, uid, gid) elif stat.S_ISREG(mode): -- cgit v1.2.1 From 7c360ce448114dd626661e688e0aad3b3754f302 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Wed, 29 Apr 2015 15:47:28 +0000 Subject: Fix sysroot.write trying to overwrite existing files Commit 807e6a90876c5469d242 changed the behaviour of sysroot.write to avoid deleting the contents of the sysroot. This was done so if you accidentally set 'sysroot=/' it wouldn't delete your whole system. It turns out that SDK deployments like clusters/sdk-example-cluster.morph depended on the contents of the directory being deleted. The system armv7lhf-cross-toolchain-system-x86_64.morph has a bunch of files installed by the cross-toolchain in /usr/armv7lhf-baserock-linux-gnueabi/sys-root. Previously sysroot.write would delete these, but since commit 807e6a90876c5469d242 it would fail with several errors like: mv: can't rename '/src/tmp/deployments/usr/armv7l.../sys-root/sbin' If we use 'cp -a' instead of 'mv' then it is slower to deploy, but there are no errors. I am still unsure why files from the cross-toolchain system are installed and then deleted. Although this patch fixes the immediate issue, I don't know if it's the right thing to do. It seems better to not install those files in the first place, if we do not need them. This commit also removes the check for the sysroot target location being empty. This doesn't work, because it runs /before/ the system being deployed is unpacked. Change-Id: I10671c2f3b2060cfb36f880675b83351c6cdd807 --- sysroot.check | 6 ------ sysroot.write | 6 +----- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/sysroot.check b/sysroot.check index 8ed965bd..71b35175 100755 --- a/sysroot.check +++ b/sysroot.check @@ -17,12 +17,6 @@ set -eu -location="$1" -if [ -d "$location" ]; then - echo >&2 "ERROR: Deployment directory already exists: $location" - exit 1 -fi - if [ "$UPGRADE" == "yes" ]; then echo >&2 "ERROR: Cannot upgrade a sysroot deployment" exit 1 diff --git a/sysroot.write b/sysroot.write index 0ad8d630..019edbe9 100755 --- a/sysroot.write +++ b/sysroot.write @@ -19,8 +19,4 @@ set -eu mkdir -p "$2" -# Move the contents of our source directory to our target -# Previously we would (cd "$1" && find -print0 | cpio -0pumd "$absolute_path") -# to do this, but the source directory is disposable anyway, so we can move -# its contents to save time -find "$1" -maxdepth 1 -mindepth 1 -exec mv {} "$2/." + +cp -a "$1/*" "$2" -- cgit v1.2.1 From 069bab3f4673b1aba33d6573576f72234a8209cd Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Fri, 8 May 2015 11:32:37 +0000 Subject: Fix mistake in sysroot.write The * should not be in quotes. Change-Id: Ieebdc7532ba1bff5ba9742f72440ed00b0c0de2a --- sysroot.write | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sysroot.write b/sysroot.write index 019edbe9..46f1a780 100755 --- a/sysroot.write +++ b/sysroot.write @@ -19,4 +19,4 @@ set -eu mkdir -p "$2" -cp -a "$1/*" "$2" +cp -a "$1"/* "$2" -- cgit v1.2.1 From 30cba5d9a8757f6bafc8079377aa3d6705e8364c Mon Sep 17 00:00:00 2001 From: Richard Ipsum Date: Thu, 21 May 2015 14:56:25 +0100 Subject: Use keystoneclient python api in openstack.check Switching to the keystoneclient python api gives us a more reliable means of detecting auth failure. Change-Id: I5f734bbfe5568c855f524a3448357f7cf46ab254 --- openstack.check | 57 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/openstack.check b/openstack.check index 4c21b604..a3379763 100755 --- a/openstack.check +++ b/openstack.check @@ -18,11 +18,13 @@ import cliapp import os import urlparse +import keystoneclient import morphlib.writeexts class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): if len(args) != 1: raise cliapp.AppException('Wrong number of command line args') @@ -38,23 +40,30 @@ class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): location = args[0] self.check_location(location) - os_params = self.get_openstack_parameters() - - self.check_openstack_parameters(location, os_params) + self.check_imagename() + self.check_openstack_parameters(self._get_auth_parameters(location)) - def get_openstack_parameters(self): + def _get_auth_parameters(self, location): '''Check the environment variables needed and returns all. The environment variables are described in the class documentation. ''' - keys = ('OPENSTACK_USER', 'OPENSTACK_TENANT', - 'OPENSTACK_IMAGENAME', 'OPENSTACK_PASSWORD') - for key in keys: - if key not in os.environ: + auth_keys = {'OPENSTACK_USER': 'username', + 'OPENSTACK_TENANT': 'tenant_name', + 'OPENSTACK_PASSWORD': 'password'} + + for key in auth_keys: + if os.environ.get(key, '') == '': raise cliapp.AppException(key + ' was not given') - return (os.environ[key] for key in keys) + auth_params = {auth_keys[key]: os.environ[key] for key in auth_keys} + auth_params['auth_url'] = location + return auth_params + + def check_imagename(self): + if os.environ.get('OPENSTACK_IMAGENAME', '') == '': + raise cliapp.AppException('OPENSTACK_IMAGENAME was not given') def check_location(self, location): x = urlparse.urlparse(location) @@ -65,27 +74,17 @@ class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): raise cliapp.AppException('API version must be v2.0 in %s'\ % location) - def check_openstack_parameters(self, auth_url, os_params): - '''Check OpenStack credentials using glance image-list''' + def check_openstack_parameters(self, auth_params): + ''' Check that we can connect to and authenticate with openstack ''' + self.status(msg='Checking OpenStack credentials...') - username, tenant_name, image_name, password = os_params - cmdline = ['glance', - '--os-username', username, - '--os-tenant-name', tenant_name, - '--os-password', password, - '--os-auth-url', auth_url, - 'image-list'] - - exit, out, err = cliapp.runcmd_unchecked(cmdline) - - if exit != 0: - if err.startswith('The request you have made requires ' - 'authentication. (HTTP 401)'): - raise cliapp.AppException('Invalid OpenStack credentials.') - else: - raise cliapp.AppException( - 'Failed to connect to OpenStack instance at %s: %s' % - (auth_url, err)) + try: + keystoneclient.v2_0.Client(**auth_params) + except keystoneclient.exceptions.Unauthorized: + errmsg = ('Failed to authenticate with OpenStack ' + '(are your credentials correct?)') + raise cliapp.AppException(errmsg) + OpenStackCheckExtension().run() -- cgit v1.2.1 From 6f49299467a09236d7c0c564fe55bc8eafa7defd Mon Sep 17 00:00:00 2001 From: Adam Coldrick Date: Tue, 2 Jun 2015 08:22:26 +0000 Subject: Move extensions into a subdirectory Change-Id: I12e7c03b30da78da1eb220d2826ce0003d6efe2e --- add-config-files.configure | 26 - busybox-init.configure | 145 ---- ceph.configure | 266 -------- chef-system-x86_64-container.morph | 12 +- cloud-init.configure | 63 -- clusters/cephclient.morph | 2 +- clusters/ci.morph | 16 +- clusters/example-distbuild-cluster.morph | 6 +- clusters/example-swift-storage-cluster.morph | 4 +- clusters/hardware-deployment.morph | 6 +- clusters/image-package-example.morph | 4 +- clusters/initramfs-test.morph | 4 +- clusters/installer-build-system-x86_64.morph | 8 +- clusters/jetson-upgrade.morph | 2 +- clusters/mason-openstack.morph | 2 +- clusters/mason.morph | 4 +- ...nimal-system-armv5l-openbmc-aspeed-deploy.morph | 2 +- clusters/minimal-system-deploy.morph | 2 +- clusters/moonshot-m2-armv8b64.morph | 4 +- clusters/moonshot-pxe-armv8b64.morph | 2 +- clusters/moonshot-pxe-armv8l64.morph | 2 +- clusters/openstack-one-node-swift.morph | 2 +- clusters/openstack-one-node.morph | 2 +- clusters/openstack-three-node-installer.morph | 6 +- clusters/openstack-two-node-installer.morph | 6 +- clusters/release.morph | 18 +- clusters/sdk-example-cluster.morph | 4 +- clusters/trove-example.morph | 4 +- clusters/trove.baserock.org-upgrade.morph | 2 +- clusters/upgrade-devel.morph | 2 +- clusters/weston-system-x86_64-generic-deploy.morph | 4 +- clusters/zookeeper.morph | 4 +- distbuild-trove-nfsboot.check | 150 ---- distbuild-trove-nfsboot.write | 283 -------- distbuild-trove-nfsboot.write.help | 49 -- distbuild.configure | 132 ---- extensions/add-config-files.configure | 26 + extensions/busybox-init.configure | 145 ++++ extensions/ceph.configure | 266 ++++++++ extensions/cloud-init.configure | 63 ++ extensions/distbuild-trove-nfsboot.check | 150 ++++ extensions/distbuild-trove-nfsboot.write | 283 ++++++++ extensions/distbuild-trove-nfsboot.write.help | 49 ++ extensions/distbuild.configure | 132 ++++ extensions/fstab.configure | 28 + extensions/hosts | 1 + extensions/hosts.configure | 48 ++ extensions/image-package-example/README | 9 + extensions/image-package-example/common.sh.in | 72 ++ .../image-package-example/disk-install.sh.in | 51 ++ .../image-package-example/make-disk-image.sh.in | 36 + extensions/image-package.write | 168 +++++ extensions/initramfs.write | 26 + extensions/initramfs.write.help | 55 ++ extensions/install-essential-files.configure | 42 ++ extensions/install-essential-files.configure.help | 20 + extensions/install-files.configure | 134 ++++ extensions/install-files.configure.help | 74 ++ extensions/installer.configure | 48 ++ extensions/jffs2.write | 64 ++ extensions/jffs2.write.help | 28 + extensions/kvm.check | 169 +++++ extensions/kvm.write | 120 ++++ extensions/kvm.write.help | 90 +++ extensions/mason.configure | 153 +++++ extensions/mason/ansible/hosts | 1 + extensions/mason/ansible/mason-setup.yml | 83 +++ extensions/mason/httpd.service | 10 + extensions/mason/mason-generator.sh | 101 +++ extensions/mason/mason-report.sh | 252 +++++++ extensions/mason/mason-setup.service | 16 + extensions/mason/mason.service | 12 + extensions/mason/mason.sh | 93 +++ extensions/mason/mason.timer | 10 + extensions/mason/os-init-script | 6 + extensions/mason/share/mason.conf | 14 + extensions/mason/share/os.conf | 30 + extensions/moonshot-kernel.configure | 33 + extensions/nfsboot-server.configure | 58 ++ extensions/nfsboot.check | 95 +++ extensions/nfsboot.configure | 30 + extensions/nfsboot.write | 202 ++++++ extensions/nfsboot.write.help | 33 + extensions/openstack-ceilometer.configure | 120 ++++ extensions/openstack-cinder.configure | 125 ++++ extensions/openstack-glance.configure | 101 +++ extensions/openstack-ironic.configure | 155 +++++ extensions/openstack-keystone.configure | 123 ++++ extensions/openstack-network.configure | 50 ++ extensions/openstack-neutron.configure | 138 ++++ extensions/openstack-nova.configure | 168 +++++ extensions/openstack-swift-controller.configure | 49 ++ extensions/openstack.check | 90 +++ extensions/openstack.write | 93 +++ extensions/openstack.write.help | 51 ++ extensions/pxeboot.check | 86 +++ extensions/pxeboot.write | 755 +++++++++++++++++++++ extensions/pxeboot.write.help | 166 +++++ extensions/rawdisk.check | 53 ++ extensions/rawdisk.write | 108 +++ extensions/rawdisk.write.help | 82 +++ extensions/sdk.write | 284 ++++++++ extensions/set-hostname.configure | 26 + extensions/simple-network.configure | 292 ++++++++ extensions/ssh-rsync.check | 64 ++ extensions/ssh-rsync.write | 172 +++++ extensions/ssh-rsync.write.help | 50 ++ extensions/sshkeys.configure | 25 + extensions/strip-gplv3.configure | 101 +++ extensions/swift-build-rings.yml | 34 + extensions/swift-storage-devices-validate.py | 60 ++ extensions/swift-storage.configure | 107 +++ extensions/sysroot.check | 23 + extensions/sysroot.write | 22 + extensions/tar.check | 23 + extensions/tar.write | 20 + extensions/tar.write.help | 19 + extensions/trove.configure | 148 ++++ extensions/trove.configure.help | 126 ++++ extensions/vagrant.configure | 55 ++ extensions/vdaboot.configure | 33 + extensions/virtualbox-ssh.check | 36 + extensions/virtualbox-ssh.write | 211 ++++++ extensions/virtualbox-ssh.write.help | 135 ++++ fstab.configure | 28 - hosts | 1 - hosts.configure | 48 -- image-package-example/README | 9 - image-package-example/common.sh.in | 72 -- image-package-example/disk-install.sh.in | 51 -- image-package-example/make-disk-image.sh.in | 36 - image-package.write | 168 ----- initramfs.write | 26 - initramfs.write.help | 55 -- install-essential-files.configure | 42 -- install-essential-files.configure.help | 20 - install-files.configure | 134 ---- install-files.configure.help | 74 -- installer.configure | 48 -- jffs2.write | 64 -- jffs2.write.help | 28 - kvm.check | 169 ----- kvm.write | 120 ---- kvm.write.help | 90 --- mason.configure | 153 ----- mason/ansible/hosts | 1 - mason/ansible/mason-setup.yml | 83 --- mason/httpd.service | 10 - mason/mason-generator.sh | 101 --- mason/mason-report.sh | 252 ------- mason/mason-setup.service | 16 - mason/mason.service | 12 - mason/mason.sh | 93 --- mason/mason.timer | 10 - mason/os-init-script | 6 - mason/share/mason.conf | 14 - mason/share/os.conf | 30 - moonshot-kernel.configure | 33 - nfsboot-server.configure | 58 -- nfsboot.check | 95 --- nfsboot.configure | 30 - nfsboot.write | 202 ------ nfsboot.write.help | 33 - openstack-ceilometer.configure | 120 ---- openstack-cinder.configure | 125 ---- openstack-glance.configure | 101 --- openstack-ironic.configure | 155 ----- openstack-keystone.configure | 123 ---- openstack-network.configure | 50 -- openstack-neutron.configure | 138 ---- openstack-nova.configure | 168 ----- openstack-swift-controller.configure | 49 -- openstack.check | 90 --- openstack.write | 93 --- openstack.write.help | 51 -- pxeboot.check | 86 --- pxeboot.write | 755 --------------------- pxeboot.write.help | 166 ----- rawdisk.check | 53 -- rawdisk.write | 108 --- rawdisk.write.help | 82 --- sdk.write | 284 -------- set-hostname.configure | 26 - simple-network.configure | 292 -------- ssh-rsync.check | 64 -- ssh-rsync.write | 172 ----- ssh-rsync.write.help | 50 -- sshkeys.configure | 25 - strip-gplv3.configure | 101 --- swift-build-rings.yml | 34 - swift-storage-devices-validate.py | 60 -- swift-storage.configure | 107 --- sysroot.check | 23 - sysroot.write | 22 - .../armv7lhf-cross-toolchain-system-x86_32.morph | 12 +- .../armv7lhf-cross-toolchain-system-x86_64.morph | 12 +- systems/base-system-armv7-highbank.morph | 10 +- systems/base-system-armv7-versatile.morph | 10 +- systems/base-system-armv7b-highbank.morph | 10 +- systems/base-system-armv7b-vexpress-tc2.morph | 10 +- systems/base-system-armv7lhf-highbank.morph | 10 +- systems/base-system-armv8b64.morph | 12 +- systems/base-system-armv8l64.morph | 12 +- systems/base-system-ppc64-generic.morph | 10 +- systems/base-system-x86_32-generic.morph | 10 +- systems/base-system-x86_64-generic.morph | 10 +- systems/build-system-armv5l-openbmc-aspeed.morph | 12 +- systems/build-system-armv7lhf-highbank.morph | 18 +- systems/build-system-armv7lhf-jetson.morph | 18 +- systems/build-system-armv8b64.morph | 20 +- systems/build-system-armv8l64.morph | 20 +- systems/build-system-ppc64.morph | 18 +- systems/build-system-x86_32-chroot.morph | 18 +- systems/build-system-x86_32.morph | 18 +- systems/build-system-x86_64-chroot.morph | 18 +- systems/build-system-x86_64.morph | 18 +- systems/ceph-service-x86_64-generic.morph | 14 +- systems/cxmanage-system-x86_64-generic.morph | 12 +- systems/devel-system-armv7-chroot.morph | 12 +- systems/devel-system-armv7-highbank.morph | 12 +- systems/devel-system-armv7-versatile.morph | 12 +- systems/devel-system-armv7-wandboard.morph | 12 +- systems/devel-system-armv7b-chroot.morph | 12 +- systems/devel-system-armv7b-highbank.morph | 12 +- systems/devel-system-armv7lhf-chroot.morph | 12 +- systems/devel-system-armv7lhf-highbank.morph | 12 +- systems/devel-system-armv7lhf-jetson.morph | 12 +- systems/devel-system-armv7lhf-wandboard.morph | 12 +- systems/devel-system-armv8b64.morph | 16 +- systems/devel-system-armv8l64.morph | 18 +- systems/devel-system-ppc64-chroot.morph | 12 +- systems/devel-system-ppc64-generic.morph | 12 +- systems/devel-system-x86_32-chroot.morph | 12 +- systems/devel-system-x86_32-generic.morph | 14 +- systems/devel-system-x86_64-chroot.morph | 12 +- systems/devel-system-x86_64-generic.morph | 16 +- systems/devel-system-x86_64-vagrant.morph | 14 +- .../genivi-baseline-system-armv7lhf-jetson.morph | 14 +- ...genivi-baseline-system-armv7lhf-versatile.morph | 14 +- .../genivi-baseline-system-x86_64-generic.morph | 12 +- systems/installer-system-armv8b64.morph | 12 +- systems/installer-system-x86_64.morph | 10 +- systems/minimal-system-armv5l-openbmc-aspeed.morph | 12 +- systems/minimal-system-x86_32-generic.morph | 12 +- systems/minimal-system-x86_64-generic.morph | 12 +- systems/nodejs-system-x86_64.morph | 10 +- systems/ocaml-system-x86_64.morph | 8 +- systems/openstack-system-x86_64.morph | 34 +- systems/qt4-devel-system-x86_64-generic.morph | 10 +- systems/qt5-devel-system-x86_64-generic.morph | 10 +- systems/swift-system-x86_64.morph | 16 +- systems/trove-system-x86_64.morph | 14 +- systems/web-system-x86_64-generic.morph | 10 +- systems/weston-system-armv7lhf-jetson.morph | 10 +- systems/weston-system-x86_64-generic.morph | 10 +- systems/xfce-system.morph | 10 +- systems/zookeeper-client-x86_64.morph | 12 +- systems/zookeeper-server-x86_64.morph | 12 +- tar.check | 23 - tar.write | 20 - tar.write.help | 19 - trove.configure | 148 ---- trove.configure.help | 126 ---- vagrant.configure | 55 -- vdaboot.configure | 33 - virtualbox-ssh.check | 36 - virtualbox-ssh.write | 211 ------ virtualbox-ssh.write.help | 135 ---- 268 files changed, 8697 insertions(+), 8697 deletions(-) delete mode 100755 add-config-files.configure delete mode 100644 busybox-init.configure delete mode 100644 ceph.configure delete mode 100755 cloud-init.configure delete mode 100755 distbuild-trove-nfsboot.check delete mode 100755 distbuild-trove-nfsboot.write delete mode 100644 distbuild-trove-nfsboot.write.help delete mode 100644 distbuild.configure create mode 100755 extensions/add-config-files.configure create mode 100644 extensions/busybox-init.configure create mode 100644 extensions/ceph.configure create mode 100755 extensions/cloud-init.configure create mode 100755 extensions/distbuild-trove-nfsboot.check create mode 100755 extensions/distbuild-trove-nfsboot.write create mode 100644 extensions/distbuild-trove-nfsboot.write.help create mode 100644 extensions/distbuild.configure create mode 100755 extensions/fstab.configure create mode 100644 extensions/hosts create mode 100755 extensions/hosts.configure create mode 100644 extensions/image-package-example/README create mode 100644 extensions/image-package-example/common.sh.in create mode 100644 extensions/image-package-example/disk-install.sh.in create mode 100644 extensions/image-package-example/make-disk-image.sh.in create mode 100755 extensions/image-package.write create mode 100755 extensions/initramfs.write create mode 100644 extensions/initramfs.write.help create mode 100755 extensions/install-essential-files.configure create mode 100644 extensions/install-essential-files.configure.help create mode 100755 extensions/install-files.configure create mode 100644 extensions/install-files.configure.help create mode 100755 extensions/installer.configure create mode 100644 extensions/jffs2.write create mode 100644 extensions/jffs2.write.help create mode 100755 extensions/kvm.check create mode 100755 extensions/kvm.write create mode 100644 extensions/kvm.write.help create mode 100644 extensions/mason.configure create mode 100644 extensions/mason/ansible/hosts create mode 100644 extensions/mason/ansible/mason-setup.yml create mode 100644 extensions/mason/httpd.service create mode 100755 extensions/mason/mason-generator.sh create mode 100755 extensions/mason/mason-report.sh create mode 100644 extensions/mason/mason-setup.service create mode 100644 extensions/mason/mason.service create mode 100755 extensions/mason/mason.sh create mode 100644 extensions/mason/mason.timer create mode 100644 extensions/mason/os-init-script create mode 100644 extensions/mason/share/mason.conf create mode 100644 extensions/mason/share/os.conf create mode 100644 extensions/moonshot-kernel.configure create mode 100755 extensions/nfsboot-server.configure create mode 100755 extensions/nfsboot.check create mode 100755 extensions/nfsboot.configure create mode 100755 extensions/nfsboot.write create mode 100644 extensions/nfsboot.write.help create mode 100644 extensions/openstack-ceilometer.configure create mode 100644 extensions/openstack-cinder.configure create mode 100644 extensions/openstack-glance.configure create mode 100644 extensions/openstack-ironic.configure create mode 100644 extensions/openstack-keystone.configure create mode 100644 extensions/openstack-network.configure create mode 100644 extensions/openstack-neutron.configure create mode 100644 extensions/openstack-nova.configure create mode 100644 extensions/openstack-swift-controller.configure create mode 100755 extensions/openstack.check create mode 100755 extensions/openstack.write create mode 100644 extensions/openstack.write.help create mode 100755 extensions/pxeboot.check create mode 100644 extensions/pxeboot.write create mode 100644 extensions/pxeboot.write.help create mode 100755 extensions/rawdisk.check create mode 100755 extensions/rawdisk.write create mode 100644 extensions/rawdisk.write.help create mode 100755 extensions/sdk.write create mode 100755 extensions/set-hostname.configure create mode 100755 extensions/simple-network.configure create mode 100755 extensions/ssh-rsync.check create mode 100755 extensions/ssh-rsync.write create mode 100644 extensions/ssh-rsync.write.help create mode 100755 extensions/sshkeys.configure create mode 100755 extensions/strip-gplv3.configure create mode 100644 extensions/swift-build-rings.yml create mode 100755 extensions/swift-storage-devices-validate.py create mode 100644 extensions/swift-storage.configure create mode 100755 extensions/sysroot.check create mode 100755 extensions/sysroot.write create mode 100755 extensions/tar.check create mode 100755 extensions/tar.write create mode 100644 extensions/tar.write.help create mode 100755 extensions/trove.configure create mode 100644 extensions/trove.configure.help create mode 100644 extensions/vagrant.configure create mode 100755 extensions/vdaboot.configure create mode 100755 extensions/virtualbox-ssh.check create mode 100755 extensions/virtualbox-ssh.write create mode 100644 extensions/virtualbox-ssh.write.help delete mode 100755 fstab.configure delete mode 100644 hosts delete mode 100755 hosts.configure delete mode 100644 image-package-example/README delete mode 100644 image-package-example/common.sh.in delete mode 100644 image-package-example/disk-install.sh.in delete mode 100644 image-package-example/make-disk-image.sh.in delete mode 100755 image-package.write delete mode 100755 initramfs.write delete mode 100644 initramfs.write.help delete mode 100755 install-essential-files.configure delete mode 100644 install-essential-files.configure.help delete mode 100755 install-files.configure delete mode 100644 install-files.configure.help delete mode 100755 installer.configure delete mode 100644 jffs2.write delete mode 100644 jffs2.write.help delete mode 100755 kvm.check delete mode 100755 kvm.write delete mode 100644 kvm.write.help delete mode 100644 mason.configure delete mode 100644 mason/ansible/hosts delete mode 100644 mason/ansible/mason-setup.yml delete mode 100644 mason/httpd.service delete mode 100755 mason/mason-generator.sh delete mode 100755 mason/mason-report.sh delete mode 100644 mason/mason-setup.service delete mode 100644 mason/mason.service delete mode 100755 mason/mason.sh delete mode 100644 mason/mason.timer delete mode 100644 mason/os-init-script delete mode 100644 mason/share/mason.conf delete mode 100644 mason/share/os.conf delete mode 100644 moonshot-kernel.configure delete mode 100755 nfsboot-server.configure delete mode 100755 nfsboot.check delete mode 100755 nfsboot.configure delete mode 100755 nfsboot.write delete mode 100644 nfsboot.write.help delete mode 100644 openstack-ceilometer.configure delete mode 100644 openstack-cinder.configure delete mode 100644 openstack-glance.configure delete mode 100644 openstack-ironic.configure delete mode 100644 openstack-keystone.configure delete mode 100644 openstack-network.configure delete mode 100644 openstack-neutron.configure delete mode 100644 openstack-nova.configure delete mode 100644 openstack-swift-controller.configure delete mode 100755 openstack.check delete mode 100755 openstack.write delete mode 100644 openstack.write.help delete mode 100755 pxeboot.check delete mode 100644 pxeboot.write delete mode 100644 pxeboot.write.help delete mode 100755 rawdisk.check delete mode 100755 rawdisk.write delete mode 100644 rawdisk.write.help delete mode 100755 sdk.write delete mode 100755 set-hostname.configure delete mode 100755 simple-network.configure delete mode 100755 ssh-rsync.check delete mode 100755 ssh-rsync.write delete mode 100644 ssh-rsync.write.help delete mode 100755 sshkeys.configure delete mode 100755 strip-gplv3.configure delete mode 100644 swift-build-rings.yml delete mode 100755 swift-storage-devices-validate.py delete mode 100644 swift-storage.configure delete mode 100755 sysroot.check delete mode 100755 sysroot.write delete mode 100755 tar.check delete mode 100755 tar.write delete mode 100644 tar.write.help delete mode 100755 trove.configure delete mode 100644 trove.configure.help delete mode 100644 vagrant.configure delete mode 100755 vdaboot.configure delete mode 100755 virtualbox-ssh.check delete mode 100755 virtualbox-ssh.write delete mode 100644 virtualbox-ssh.write.help diff --git a/add-config-files.configure b/add-config-files.configure deleted file mode 100755 index 2cf96fd1..00000000 --- a/add-config-files.configure +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh -# Copyright (C) 2013,2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -# Copy all files located in $SRC_CONFIG_DIR to the image /etc. - - -set -e - -if [ "x${SRC_CONFIG_DIR}" != x ] -then - cp -r "$SRC_CONFIG_DIR"/* "$1/etc/" -fi - diff --git a/busybox-init.configure b/busybox-init.configure deleted file mode 100644 index c7dba3b9..00000000 --- a/busybox-init.configure +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to configure a system -# to use busybox for its init, if INIT_SYSTEM=busybox is specified. -# -# As well as checking INIT_SYSTEM, the following variables are used. -# -# Getty configuration: -# * CONSOLE_DEVICE: Which device to spawn a getty on (default: ttyS0) -# * CONSOLE_BAUDRATE: Baud rate of the console (default: 115200) -# * CONSOLE_MODE: What kind of terminal this console emulates -# (default: vt100) - -if [ "$INIT_SYSTEM" != busybox ]; then - echo Not configuring system to use busybox init. - exit 0 -fi - -set -e -echo Configuring system to use busybox init - -RUN_SCRIPT=/etc/rcS -INIT_SCRIPT=/sbin/init - -install_mdev_config(){ - install -D -m644 /dev/stdin "$1" <<'EOF' -# support module loading on hotplug -$MODALIAS=.* root:root 660 @modprobe "$MODALIAS" - -# null may already exist; therefore ownership has to be changed with command -null root:root 666 @chmod 666 $MDEV -zero root:root 666 -full root:root 666 -random root:root 444 -urandom root:root 444 -hwrandom root:root 444 -grsec root:root 660 - -kmem root:root 640 -mem root:root 640 -port root:root 640 -# console may already exist; therefore ownership has to be changed with command -console root:root 600 @chmod 600 $MDEV -ptmx root:root 666 -pty.* root:root 660 - -# Typical devices - -tty root:root 666 -tty[0-9]* root:root 660 -vcsa*[0-9]* root:root 660 -ttyS[0-9]* root:root 660 - -# block devices -ram[0-9]* root:root 660 -loop[0-9]+ root:root 660 -sd[a-z].* root:root 660 -hd[a-z][0-9]* root:root 660 -md[0-9]* root:root 660 -sr[0-9]* root:root 660 @ln -sf $MDEV cdrom -fd[0-9]* root:root 660 - -# net devices -SUBSYSTEM=net;.* root:root 600 @nameif -tun[0-9]* root:root 600 =net/ -tap[0-9]* root:root 600 =net/ -EOF -} - -install_start_script(){ - install -D -m755 /dev/stdin "$1" <<'EOF' -#!/bin/sh -mount -t devtmpfs devtmpfs /dev -mount -t proc proc /proc -mount -t sysfs sysfs /sys -mkdir -p /dev/pts -mount -t devpts devpts /dev/pts - -echo /sbin/mdev >/proc/sys/kernel/hotplug -mdev -s - -hostname -F /etc/hostname - -run-parts -a start /etc/init.d -EOF -} - -install_inittab(){ - local inittab="$1" - local dev="$2" - local baud="$3" - local mode="$4" - install -D -m644 /dev/stdin "$1" <&2 - exit 1 -} - -install_mdev_config "$1/etc/mdev.conf" - -install_start_script "$1$RUN_SCRIPT" - -install_inittab "$1/etc/inittab" "${CONSOLE_DEV-ttyS0}" \ - "${CONSOLE_BAUD-115200}" "${CONSOLE_MODE-vt100}" - -install_init_symlink "$1$INIT_SCRIPT" diff --git a/ceph.configure b/ceph.configure deleted file mode 100644 index c3cd92d1..00000000 --- a/ceph.configure +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License.5 -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -import cliapp -import sys -import os -import subprocess -import shutil -import re -import stat - -systemd_monitor_template = """ -[Unit] -Description=Ceph Monitor firstboot setup -After=network-online.target - -[Service] -ExecStart=/bin/bash -c "/root/setup-ceph-head | tee /root/monitor-setuplog" -ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-monitor-fboot.service - -[Install] -Wanted-By=multi-user.target -""" - -systemd_monitor_fname_template = "ceph-monitor-fboot.service" - -systemd_osd_template = """ -[Unit] -Description=Ceph osd firstboot setup -After=network-online.target - -[Service] -ExecStart=/bin/bash -c "/root/setup-ceph-node | tee /root/storage-setuplog" -ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-storage-fboot.service - -[Install] -Wanted-By=multi-user.target -""" -systemd_osd_fname_template = "ceph-storage-fboot.service" - -ceph_monitor_config_template = """#!/bin/bash -ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' -ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' -ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring -monmaptool --create --add 0 10.0.100.2 --fsid 9ceb9257-7541-4de4-b34b-586079986700 /tmp/monmap -mkdir /var/lib/ceph/mon/ceph-0 -ceph-mon --mkfs -i 0 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring -/etc/init.d/ceph start mon.0 -touch ~/monitor-configured -""" - -ceph_storage_config_template = """#!/bin/bash -scp 10.0.100.2:/var/lib/ceph/bootstrap-osd/ceph.keyring /var/lib/ceph/bootstrap-osd/ -echo -e "n\np\n1\n\n\nw\n" | fdisk /dev/sdb -ceph-disk prepare --cluster ceph --cluster-uuid 9ceb9257-7541-4de4-b34b-586079986700 --fs-type ext4 /dev/sdb1 -sudo ceph-disk activate /dev/sdb1 -/etc/init.d/ceph start osd.0 -touch ~/storage-configured -""" - -executable_file_permissions = stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR | \ - stat.S_IXGRP | stat.S_IRGRP | \ - stat.S_IXOTH | stat.S_IROTH - -class CephConfigurationExtension(cliapp.Application): - """ - Set up ceph server daemons. - - Must include the following environment variables: - - HOSTNAME - Must be defined it is used as the ID for - the monitor and metadata daemons. - CEPH_CONF - Provide a ceph configuration file. - - Optional environment variables: - - CEPH_CLUSTER - Cluster name, if not provided defaults to 'ceph'. - - CEPH_BOOTSTRAP_OSD - Registered key capable of generating OSD - keys. - CEPH_BOOTSTRAP_MDS - Registered key capable of generating MDS - keys. - - Bootstrap keys are required for creating OSD daemons on servers - that do not have a running monitor daemon. They are gathered - by 'ceph-deploy gatherkeys' but can be generated and registered - separately. - - CEPH_MON - (Blank) Create a ceph monitor daemon on the image. - CEPH_MON_KEYRING - Location of monitor keyring. Required by the - monitor if using cephx authentication. - - CEPH_OSD_X_DATA_DIR - Location of data directory for OSD. - Create an OSD daemon on image. 'X' is an integer - id, many osd daemons may be run on same server. - - CEPH_MDS - (Blank) Create a metadata server daemon on server. - """ - - def process_args(self, args): - - if "HOSTNAME" not in os.environ: - print "ERROR: Need a hostname defined by 'HOSTNAME'" - sys.exit(1) - if "CEPH_CLUSTER" not in os.environ: - print "ERROR: Need a cluster name defined by 'CEPH_CLUSTER'" - sys.exit(1) - if "CEPH_CONF" not in os.environ: - print "ERROR: Need a ceph conf file defined by 'CEPH_CONF'" - sys.exit(1) - - self.dest_dir = args[0] - - self.cluster_name = os.environ["CEPH_CLUSTER"] - self.hostname = os.environ["HOSTNAME"] - - self.conf_file = "/etc/ceph/{}.conf".format(self.cluster_name) - self.mon_dir = "/var/lib/ceph/mon/" - self.osd_dir = "/var/lib/ceph/osd/" - self.mds_dir = "/var/lib/ceph/mds/" - self.tmp_dir = "/var/lib/ceph/tmp/" - self.bootstrap_mds_dir = "/var/lib/ceph/bootstrap-mds/" - self.bootstrap_osd_dir = "/var/lib/ceph/bootstrap-osd/" - self.systemd_dir = "/etc/systemd/system/" - self.systemd_multiuser_dir = "/etc/systemd/system/multi-user.target.wants/" - - self.copy_to_img(os.environ["CEPH_CONF"], self.conf_file) - - # Copy over bootstrap keyrings - if "CEPH_BOOTSTRAP_OSD" in os.environ: - self.copy_bootstrap_osd(os.environ["CEPH_BOOTSTRAP_OSD"]); - if "CEPH_BOOTSTRAP_MDS" in os.environ: - self.copy_bootstrap_mds(os.environ["CEPH_BOOTSTRAP_MDS"]); - - # Configure any monitor daemons - if "CEPH_MON" in os.environ: - self.create_mon_data_dir(os.environ.get("CEPH_MON_KEYRING")) - else: - self.create_osd_startup_script("None", "None") - - # Configure any object storage daemons - osd_re = r"CEPH_OSD_(\d+)_DATA_DIR$" - - for env in os.environ.keys(): - match = re.match(osd_re, env) - if match: - osd_data_dir_env = match.group(0) - osd_id = match.group(1) - - self.create_osd_data_dir(osd_id, os.environ.get(osd_data_dir_env)) - - # Configure any mds daemons - if "CEPH_MDS" in os.environ: - self.create_mds_data_dir() - - # Create a fake 'partprobe' - fake_partprobe_filename = self.dest_dir + "/sbin/partprobe" - fake_partprobe = open(fake_partprobe_filename, 'w') - fake_partprobe.write("#!/bin/bash\nexit 0;\n") - fake_partprobe.close() - os.chmod(fake_partprobe_filename, executable_file_permissions) - self.create_startup_scripts() - - def copy_to_img(self, src_file, dest_file): - shutil.copy(src_file, self.dest_dir + dest_file) - - def copy_bootstrap_osd(self, src_file): - self.copy_to_img(src_file, - os.path.join(self.bootstrap_osd_dir, "{}.keyring".format(self.cluster_name))) - - def copy_bootstrap_mds(self, src_file): - self.copy_to_img(src_file, - os.path.join(self.bootstrap_mds_dir, "{}.keyring".format(self.cluster_name))) - - def symlink_to_multiuser(self, fname): - print >> sys.stderr, os.path.join("../", fname) - print >> sys.stderr, self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname) - os.symlink(os.path.join("../", fname), - self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname)) - - def create_mon_data_dir(self, src_keyring): - - #Create the monitor data directory - mon_data_dir = os.path.join(self.mon_dir, "{}-{}".format(self.cluster_name, self.hostname)) - os.makedirs(self.dest_dir + mon_data_dir) - - #Create sysvinit file to start via sysvinit - sysvinit_file = os.path.join(mon_data_dir, "sysvinit") - open(self.dest_dir + sysvinit_file, 'a').close() - - #Create systemd file to initialize the monitor data directory - keyring = "" - if src_keyring: - #Copy the keyring from local to the image - dest_keyring = os.path.join(self.tmp_dir, - "{}-{}.mon.keyring".format(self.cluster_name, self.hostname)) - self.copy_to_img(src_keyring, dest_keyring) - keyring = "--keyring " + dest_keyring - - mon_systemd_fname = systemd_monitor_fname_template - systemd_script_name = self.dest_dir + os.path.join(self.systemd_dir, mon_systemd_fname) - mon_systemd = open(systemd_script_name, 'w') - mon_systemd.write(systemd_monitor_template) - mon_systemd.close() - #Create a symlink to the multi user target - self.symlink_to_multiuser(mon_systemd_fname) - - def create_osd_data_dir(self, osd_id, data_dir): - if not data_dir: - data_dir = '/srv/osd' + osd_id - - #Create the osd data dir - os.makedirs(self.dest_dir + data_dir) - - def create_osd_startup_script(self, osd_id, data_dir): - osd_systemd_fname = systemd_osd_fname_template - osd_full_name = self.dest_dir + os.path.join(self.systemd_dir, osd_systemd_fname) - - osd_systemd = open(osd_full_name, 'w') - - osd_systemd.write(systemd_osd_template) - osd_systemd.close() - - #Create a symlink to the multi user target - self.symlink_to_multiuser(osd_systemd_fname) - - def create_mds_data_dir(self): - - #Create the monitor data directory - mds_data_dir = os.path.join(self.mds_dir, "{}-{}".format(self.cluster_name, self.hostname)) - os.makedirs(self.dest_dir + mds_data_dir) - - #Create sysvinit file to start via sysvinit - sysvinit_file = os.path.join(mds_data_dir, "sysvinit") - open(self.dest_dir + sysvinit_file, 'a').close() - - - def create_startup_scripts(self): - head_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-head") - - ceph_head_setup = open(head_setup_file, "w") - ceph_head_setup.write(ceph_monitor_config_template) - ceph_head_setup.close() - os.chmod(head_setup_file, executable_file_permissions) - - osd_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-node") - ceph_node_setup = open(osd_setup_file, "w") - ceph_node_setup.write(ceph_storage_config_template) - ceph_node_setup.close() - os.chmod(osd_setup_file, executable_file_permissions) - - -CephConfigurationExtension().run() diff --git a/chef-system-x86_64-container.morph b/chef-system-x86_64-container.morph index 3e81c73e..889eabea 100644 --- a/chef-system-x86_64-container.morph +++ b/chef-system-x86_64-container.morph @@ -3,12 +3,12 @@ kind: system arch: x86_64 description: Minimal chef system suitable for running in a container configuration-extensions: -- set-hostname -- simple-network -- nfsboot -- install-files -- busybox-init -- remove-gcc +- extensions/set-hostname +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/busybox-init +- extensions/remove-gcc strata: - name: build-essential morph: strata/build-essential.morph diff --git a/cloud-init.configure b/cloud-init.configure deleted file mode 100755 index aa83e0e2..00000000 --- a/cloud-init.configure +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# This is a "morph deploy" configuration extension to enable the -# cloud-init services. -set -e - -ROOT="$1" - -########################################################################## - -set -e - -case "$CLOUD_INIT" in -''|False|no) - exit 0 - ;; -True|yes) - echo "Configuring cloud-init" - ;; -*) - echo Unrecognised option "$CLOUD_INIT" to CLOUD_INIT - exit 1 - ;; -esac - - -cloud_init_services="cloud-config.service - cloud-init-local.service - cloud-init.service - cloud-final.service" - -# Iterate over the cloud-init services and enable them creating a link -# into /etc/systemd/system/multi-user.target.wants. -# If the services to link are not present, fail. - -services_folder="lib/systemd/system" -for service_name in $cloud_init_services; do - if [ ! -f "$ROOT/$services_folder/$service_name" ]; then - echo "ERROR: Service $service_name is missing." >&2 - echo "Failed to configure cloud-init." - exit 1 - else - echo Enabling systemd service "$service_name" >"$MORPH_LOG_FD" - ln -sf "/$services_folder/$service_name" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$service_name" - fi -done diff --git a/clusters/cephclient.morph b/clusters/cephclient.morph index b4db22e0..f9cc04a2 100644 --- a/clusters/cephclient.morph +++ b/clusters/cephclient.morph @@ -4,7 +4,7 @@ systems: - morph: systems/ceph-service-x86_64-generic.morph deploy: ceph-node-virtualbox-image: - type: virtualbox-ssh + type: extensions/virtualbox-ssh SYSTEM: systems/ceph-service-x86_64-generic.morph location: vbox+ssh://user@machine/ChefNode4/home/user/chefnode4.vdi # HOST_IPADDR and NETMASK should be set to the IP address and netmask of the virtualbox host on the host-only interface. diff --git a/clusters/ci.morph b/clusters/ci.morph index cb56328c..d37733ab 100644 --- a/clusters/ci.morph +++ b/clusters/ci.morph @@ -9,19 +9,19 @@ systems: - morph: systems/devel-system-x86_64-generic.morph deploy: devel-system-x86_64-generic: - type: rawdisk + type: extensions/rawdisk location: devel-system-x86_64-generic.img DISK_SIZE: 4G - morph: systems/devel-system-x86_32-generic.morph deploy: devel-system-x86_32-generic: - type: rawdisk + type: extensions/rawdisk location: devel-system-x86_32-generic.img DISK_SIZE: 4G - morph: systems/build-system-armv7lhf-jetson.morph deploy: build-system-armv7lhf-jetson: - type: rawdisk + type: extensions/rawdisk location: build-system-armv7lhf-jetson.img DISK_SIZE: 2G BOOT_DEVICE: "/dev/mmcblk0p1" @@ -33,14 +33,14 @@ systems: - morph: systems/weston-system-x86_64-generic.morph deploy: weston-system-x86_64-generic: - type: rawdisk + type: extensions/rawdisk location: weston-system-x86_64-generic.img DISK_SIZE: 4G KERNEL_ARGS: vga=788 - morph: systems/weston-system-armv7lhf-jetson.morph deploy: weston-system-armv7lhf-jetson: - type: rawdisk + type: extensions/rawdisk location: weston-system-armv7lhf-jetson.img DISK_SIZE: 4G BOOT_DEVICE: "/dev/mmcblk0p1" @@ -52,14 +52,14 @@ systems: - morph: systems/genivi-baseline-system-x86_64-generic.morph deploy: genivi-baseline-system-x86_64-generic: - type: rawdisk + type: extensions/rawdisk location: genivi-baseline-system-x86_64-generic.img DISK_SIZE: 4G KERNEL_ARGS: vga=788 - morph: systems/genivi-baseline-system-armv7lhf-jetson.morph deploy: genivi-baseline-system-armv7lhf-jetson: - type: rawdisk + type: extensions/rawdisk location: genivi-baseline-system-armv7lhf-jetson.img DISK_SIZE: 4G BOOT_DEVICE: "/dev/mmcblk0p1" @@ -71,7 +71,7 @@ systems: - morph: systems/openstack-system-x86_64.morph deploy: openstack-system-x86_64: - type: rawdisk + type: extensions/rawdisk location: baserock-openstack-system-x86_64.img DISK_SIZE: 5G INSTALL_FILES: openstack/manifest diff --git a/clusters/example-distbuild-cluster.morph b/clusters/example-distbuild-cluster.morph index 513c16c5..b5cd11ef 100644 --- a/clusters/example-distbuild-cluster.morph +++ b/clusters/example-distbuild-cluster.morph @@ -22,16 +22,16 @@ systems: WORKER_SSH_KEY: ssh-keys/worker.key deploy: build-controller: - type: nfsboot + type: extensions/nfsboot location: $MY_TROVE DISTBUILD_CONTROLLER: true HOSTNAME: build-controller WORKERS: build-node-1, build-node-2 build-node-1: - type: nfsboot + type: extensions/nfsboot location: $MY_TROVE HOSTNAME: build-node-1 build-node-2: - type: nfsboot + type: extensions/nfsboot location: $MY_TROVE HOSTNAME: build-node-2 diff --git a/clusters/example-swift-storage-cluster.morph b/clusters/example-swift-storage-cluster.morph index b1ea784f..2a512709 100644 --- a/clusters/example-swift-storage-cluster.morph +++ b/clusters/example-swift-storage-cluster.morph @@ -39,7 +39,7 @@ systems: deploy: node0: - type: kvm + type: extensions/kvm location: kvm+ssh://user@host/swift-storage-0/home/user/swift-storage-0.img DISK_SIZE: 10G RAM_SIZE: 1G @@ -50,7 +50,7 @@ systems: MANAGEMENT_INTERFACE_IP_ADDRESS: ATTACH_DISKS: /dev/node0_sdb:/dev/node0_sdc:/dev/node0_sdd node1: - type: kvm + type: extensions/kvm location: kvm+ssh://user@host/swift-storage-1/home/user/swift-storage-1.img DISK_SIZE: 10G RAM_SIZE: 1G diff --git a/clusters/hardware-deployment.morph b/clusters/hardware-deployment.morph index c6b7dce9..674d6587 100644 --- a/clusters/hardware-deployment.morph +++ b/clusters/hardware-deployment.morph @@ -8,7 +8,7 @@ systems: - morph: systems/installer-system-x86_64.morph deploy: installer: - type: pxeboot + type: extensions/pxeboot location: AB:CD:EF:12:34:56:78 #MAC address. PXEBOOT_MODE: spawn-novlan PXEBOOT_DEPLOYER_INTERFACE: ens6 @@ -23,7 +23,7 @@ systems: - morph: systems/build-system-x86_64.morph deploy: to-install: - type: sysroot + type: extensions/sysroot location: /rootfs INITRAMFS_PATH: boot/initramfs.gz KERNEL_ARGS: console=ttyS1,9600 console=tty0 @@ -31,5 +31,5 @@ systems: - morph: systems/initramfs-x86_64.morph deploy: initramfs: - type: initramfs + type: extensions/initramfs location: boot/initramfs.gz diff --git a/clusters/image-package-example.morph b/clusters/image-package-example.morph index fd8487e2..ca79ec97 100644 --- a/clusters/image-package-example.morph +++ b/clusters/image-package-example.morph @@ -6,7 +6,7 @@ systems: - morph: systems/base-system-x86_32-generic.morph deploy: imgpkg: - type: image-package + type: extensions/image-package location: image-package-example.tar BOOTLOADER_BLOBS: /usr/share/syslinux/mbr.bin - INCLUDE_SCRIPTS: image-package-example/make-disk-image.sh.in:image-package-example/disk-install.sh.in:image-package-example/common.sh.in + INCLUDE_SCRIPTS: extensions/image-package-example/make-disk-image.sh.in:extensions/image-package-example/disk-install.sh.in:extensions/image-package-example/common.sh.in diff --git a/clusters/initramfs-test.morph b/clusters/initramfs-test.morph index afc94961..dd7d91e1 100644 --- a/clusters/initramfs-test.morph +++ b/clusters/initramfs-test.morph @@ -4,7 +4,7 @@ systems: - morph: systems/base-system-x86_64-generic.morph deploy: system: - type: rawdisk + type: extensions/rawdisk location: initramfs-system-x86_64.img DISK_SIZE: 1G HOSTNAME: initramfs-system @@ -13,5 +13,5 @@ systems: - morph: systems/initramfs-x86_64.morph deploy: initramfs: - type: initramfs + type: extensions/initramfs location: boot/initramfs.gz diff --git a/clusters/installer-build-system-x86_64.morph b/clusters/installer-build-system-x86_64.morph index a9ebcaca..d9a2a28b 100644 --- a/clusters/installer-build-system-x86_64.morph +++ b/clusters/installer-build-system-x86_64.morph @@ -24,7 +24,7 @@ systems: - morph: systems/installer-system-x86_64.morph deploy: installer: - type: rawdisk + type: extensions/rawdisk location: installer-build-system-x86_64.img KERNEL_ARGS: init=/usr/lib/baserock-installer/installer DISK_SIZE: 6G @@ -36,17 +36,17 @@ systems: - morph: systems/initramfs-x86_64.morph deploy: installer-initramfs: - type: initramfs + type: extensions/initramfs location: boot/initramfs.gz - morph: systems/build-system-x86_64.morph deploy: to-install: - type: sysroot + type: extensions/sysroot location: /rootfs INITRAMFS_PATH: boot/initramfs.gz subsystems: - morph: systems/initramfs-x86_64.morph deploy: to-install-initramfs: - type: initramfs + type: extensions/initramfs location: boot/initramfs.gz diff --git a/clusters/jetson-upgrade.morph b/clusters/jetson-upgrade.morph index 9fd5155b..e6ec97e0 100644 --- a/clusters/jetson-upgrade.morph +++ b/clusters/jetson-upgrade.morph @@ -14,5 +14,5 @@ systems: FSTAB_SRC: LABEL=src /src auto defaults,rw,noatime,nofail 0 2 deploy: self: - type: ssh-rsync + type: extensions/ssh-rsync location: root@127.0.0.1 diff --git a/clusters/mason-openstack.morph b/clusters/mason-openstack.morph index 6ef14888..915e14e7 100644 --- a/clusters/mason-openstack.morph +++ b/clusters/mason-openstack.morph @@ -18,7 +18,7 @@ systems: WORKER_SSH_KEY: ssh-keys/worker.key deploy: mason-openstack: - type: openstack + type: extensions/openstack location: openstack-auth-url (eg example.com:5000/v2.0) DISK_SIZE: 6G DISTBUILD_CONTROLLER: true diff --git a/clusters/mason.morph b/clusters/mason.morph index 9717239d..21399ea0 100644 --- a/clusters/mason.morph +++ b/clusters/mason.morph @@ -11,7 +11,7 @@ systems: - morph: systems/trove-system-x86_64.morph deploy: red-box-v1-trove: - type: kvm + type: extensions/kvm location: kvm+ssh://vm-user@vm-host/red-box-v1-trove/vm-path/red-box-v1-trove.img AUTOSTART: true DISK_SIZE: 20G @@ -44,7 +44,7 @@ systems: WORKER_SSH_KEY: ssh_keys/worker.key deploy: red-box-v1-controller: - type: kvm + type: extensions/kvm location: kvm+ssh://vm-user@vm-host/red-box-v1-controller/vm-path/red-box-v1-controller.img DISK_SIZE: 60G DISTBUILD_CONTROLLER: true diff --git a/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph b/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph index eea600cf..9647e7a7 100644 --- a/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph +++ b/clusters/minimal-system-armv5l-openbmc-aspeed-deploy.morph @@ -4,7 +4,7 @@ systems: - morph: systems/minimal-system-armv5l-openbmc-aspeed.morph deploy: minimal-system-armv5l-openbmc-aspeed: - type: jffs2 + type: extensions/jffs2 location: minimal-system-armv5l-openbmc-aspeed.img ROOT_DEVICE: "/dev/mtdblock" BOOTLOADER_CONFIG_FORMAT: "extlinux" diff --git a/clusters/minimal-system-deploy.morph b/clusters/minimal-system-deploy.morph index 06629ffc..cf8de54f 100644 --- a/clusters/minimal-system-deploy.morph +++ b/clusters/minimal-system-deploy.morph @@ -6,7 +6,7 @@ systems: - morph: systems/minimal-system-x86_32-generic.morph deploy: vm: - type: kvm + type: extensions/kvm location: kvm+ssh://192.168.122.1/tiny-x86_32/srv/VMs/tiny-x86_32.img DISK_SIZE: 512M HOSTNAME: tiny-x86_32 diff --git a/clusters/moonshot-m2-armv8b64.morph b/clusters/moonshot-m2-armv8b64.morph index c8e5bc81..c46b1d9e 100644 --- a/clusters/moonshot-m2-armv8b64.morph +++ b/clusters/moonshot-m2-armv8b64.morph @@ -8,7 +8,7 @@ systems: - morph: systems/installer-system-armv8b64.morph deploy: installer: - type: pxeboot + type: extensions/pxeboot location: 14:58:d0:57:7f:42 PXEBOOT_MODE: existing-server PXEBOOT_CONFIG_TFTP_ADDRESS: sftp://192.168.0.1/srv/nfsboot/tftp/ @@ -44,7 +44,7 @@ systems: - morph: systems/devel-system-armv8b64.morph deploy: to-install: - type: sysroot + type: extensions/sysroot location: /rootfs HOSTNAME: baserock-c31n1 DTB_PATH: boot/m400-1003.dtb diff --git a/clusters/moonshot-pxe-armv8b64.morph b/clusters/moonshot-pxe-armv8b64.morph index 2d32efb0..a16a3602 100644 --- a/clusters/moonshot-pxe-armv8b64.morph +++ b/clusters/moonshot-pxe-armv8b64.morph @@ -9,7 +9,7 @@ systems: - morph: systems/devel-system-armv8b64.morph deploy: netboot: - type: pxeboot + type: extensions/pxeboot location: 14:58:d0:57:7f:42 PXEBOOT_MODE: existing-server PXEBOOT_CONFIG_TFTP_ADDRESS: sftp://192.168.0.1/srv/nfsboot/tftp/ diff --git a/clusters/moonshot-pxe-armv8l64.morph b/clusters/moonshot-pxe-armv8l64.morph index 3286c72e..9fd7cee7 100644 --- a/clusters/moonshot-pxe-armv8l64.morph +++ b/clusters/moonshot-pxe-armv8l64.morph @@ -9,7 +9,7 @@ systems: - morph: systems/devel-system-armv8l64.morph deploy: netboot: - type: pxeboot + type: extensions/pxeboot location: 14:58:d0:57:7f:42 PXEBOOT_MODE: existing-server PXEBOOT_CONFIG_TFTP_ADDRESS: sftp://192.168.0.1/srv/nfsboot/tftp/ diff --git a/clusters/openstack-one-node-swift.morph b/clusters/openstack-one-node-swift.morph index 588b6e81..37429283 100644 --- a/clusters/openstack-one-node-swift.morph +++ b/clusters/openstack-one-node-swift.morph @@ -40,7 +40,7 @@ systems: - morph: systems/openstack-system-x86_64.morph deploy: release: - type: rawdisk + type: extensions/rawdisk location: baserock-openstack-system-x86_64.img DISK_SIZE: 10G INSTALL_FILES: openstack/manifest swift/manifest diff --git a/clusters/openstack-one-node.morph b/clusters/openstack-one-node.morph index 037cd23c..d6b4c582 100644 --- a/clusters/openstack-one-node.morph +++ b/clusters/openstack-one-node.morph @@ -40,7 +40,7 @@ systems: - morph: systems/openstack-system-x86_64.morph deploy: release: - type: rawdisk + type: extensions/rawdisk location: baserock-openstack-system-x86_64.img DISK_SIZE: 10G INSTALL_FILES: openstack/manifest diff --git a/clusters/openstack-three-node-installer.morph b/clusters/openstack-three-node-installer.morph index 6285217a..afad454a 100644 --- a/clusters/openstack-three-node-installer.morph +++ b/clusters/openstack-three-node-installer.morph @@ -59,7 +59,7 @@ systems: - morph: systems/installer-system-x86_64.morph deploy: network-installer: &installer - type: rawdisk + type: extensions/rawdisk location: installer-openstack-network-x86_64.img KERNEL_ARGS: init=/usr/lib/baserock-installer/installer DISK_SIZE: 6G @@ -72,12 +72,12 @@ systems: - morph: systems/initramfs-x86_64.morph deploy: network-initramfs: &initramfs - type: initramfs + type: extensions/initramfs location: boot/initramfs.gz - morph: systems/openstack-system-x86_64.morph deploy: network-to-install: &stack-node - type: sysroot + type: extensions/sysroot location: rootfs INSTALL_FILES: openstack/manifest INITRAMFS_PATH: boot/initramfs.gz diff --git a/clusters/openstack-two-node-installer.morph b/clusters/openstack-two-node-installer.morph index f05b0e9b..53d0b7b1 100644 --- a/clusters/openstack-two-node-installer.morph +++ b/clusters/openstack-two-node-installer.morph @@ -58,7 +58,7 @@ systems: - morph: systems/installer-system-x86_64.morph deploy: controller-installer: &installer - type: rawdisk + type: extensions/rawdisk location: installer-openstack-controller-x86_64.img KERNEL_ARGS: init=/usr/lib/baserock-installer/installer DISK_SIZE: 6G @@ -71,12 +71,12 @@ systems: - morph: systems/initramfs-x86_64.morph deploy: controller-initramfs: &initramfs - type: initramfs + type: extensions/initramfs location: boot/initramfs.gz - morph: systems/openstack-system-x86_64.morph deploy: controller-to-install: &stack-node - type: sysroot + type: extensions/sysroot location: rootfs INSTALL_FILES: openstack/manifest INITRAMFS_PATH: boot/initramfs.gz diff --git a/clusters/release.morph b/clusters/release.morph index c5bfffca..1574bde6 100644 --- a/clusters/release.morph +++ b/clusters/release.morph @@ -10,23 +10,23 @@ systems: - morph: systems/build-system-x86_32-chroot.morph deploy: build-system-x86_32-chroot: - type: tar + type: extensions/tar location: build-system-x86_32-chroot.tar - morph: systems/build-system-x86_32.morph deploy: build-system-x86_32: - type: rawdisk + type: extensions/rawdisk location: build-system-x86_32.img DISK_SIZE: 6G - morph: systems/build-system-x86_64-chroot.morph deploy: build-system-x86_64-chroot: - type: tar + type: extensions/tar location: build-system-x86_64-chroot.tar - morph: systems/build-system-x86_64.morph deploy: build-system-x86_64: - type: rawdisk + type: extensions/rawdisk location: build-system-x86_64.img DISK_SIZE: 6G INITRAMFS_PATH: boot/initramfs.gz @@ -34,12 +34,12 @@ systems: - morph: systems/initramfs-x86_64.morph deploy: initramfs-build-system-x86_64: - type: initramfs + type: extensions/initramfs location: boot/initramfs.gz - morph: systems/build-system-armv7lhf-jetson.morph deploy: build-system-armv7lhf-jetson: - type: rawdisk + type: extensions/rawdisk location: build-system-armv7lhf-jetson.img DISK_SIZE: 2G BOOT_DEVICE: "/dev/mmcblk0p1" @@ -51,7 +51,7 @@ systems: - morph: systems/genivi-baseline-system-x86_64-generic.morph deploy: genivi-baseline-system-x86_64-generic: - type: rawdisk + type: extensions/rawdisk location: genivi-baseline-system-x86_64-generic.img DISK_SIZE: 4G KERNEL_ARGS: vga=788 @@ -60,12 +60,12 @@ systems: - morph: systems/initramfs-x86_64.morph deploy: initramfs-genivi-baseline-system-x86_64-generic: - type: initramfs + type: extensions/initramfs location: boot/initramfs.gz - morph: systems/genivi-baseline-system-armv7lhf-jetson.morph deploy: genivi-baseline-system-armv7lhf-jetson: - type: rawdisk + type: extensions/rawdisk location: genivi-baseline-system-armv7lhf-jetson.img DISK_SIZE: 4G BOOT_DEVICE: "/dev/mmcblk0p1" diff --git a/clusters/sdk-example-cluster.morph b/clusters/sdk-example-cluster.morph index 92e4a413..a4413c3b 100644 --- a/clusters/sdk-example-cluster.morph +++ b/clusters/sdk-example-cluster.morph @@ -34,7 +34,7 @@ systems: - morph: systems/armv7lhf-cross-toolchain-system-x86_64.morph deploy: sdk: - type: sdk + type: extensions/sdk location: armv7lhf-cross-toolchain-system-x86_64.sh PREFIX: /usr TARGET: armv7lhf-baserock-linux-gnueabi @@ -42,5 +42,5 @@ systems: - morph: systems/devel-system-armv7lhf-highbank.morph deploy: sysroot: - type: sysroot + type: extensions/sysroot location: usr/armv7lhf-baserock-linux-gnueabi/sys-root diff --git a/clusters/trove-example.morph b/clusters/trove-example.morph index 2812f60e..81b1c901 100644 --- a/clusters/trove-example.morph +++ b/clusters/trove-example.morph @@ -50,9 +50,9 @@ systems: UPSTREAM_TROVE: '' deploy: initial: - type: kvm + type: extensions/kvm location: kvm+ssh://vm-user@vm-host/test-trove/vm-path/test-trove.img VERSION_LABEL: 1 upgrade: - type: ssh-rsync + type: extensions/ssh-rsync location: test-trove diff --git a/clusters/trove.baserock.org-upgrade.morph b/clusters/trove.baserock.org-upgrade.morph index eaf939e1..e66fd6bc 100644 --- a/clusters/trove.baserock.org-upgrade.morph +++ b/clusters/trove.baserock.org-upgrade.morph @@ -13,7 +13,7 @@ systems: - morph: systems/trove-system-x86_64.morph deploy: gbo: - type: ssh-rsync + type: extensions/ssh-rsync location: root@git.baserock.org FSTAB_HOME: LABEL=homes /home auto defaults,noatime,rw 0 2 HOSTNAME: firehose1 diff --git a/clusters/upgrade-devel.morph b/clusters/upgrade-devel.morph index b7ce9bc0..3efbb36a 100644 --- a/clusters/upgrade-devel.morph +++ b/clusters/upgrade-devel.morph @@ -35,5 +35,5 @@ systems: - morph: systems/devel-system-x86_64-generic.morph deploy: self: - type: ssh-rsync + type: extensions/ssh-rsync location: root@127.0.0.1 diff --git a/clusters/weston-system-x86_64-generic-deploy.morph b/clusters/weston-system-x86_64-generic-deploy.morph index 3a6f29ef..65e35bd7 100644 --- a/clusters/weston-system-x86_64-generic-deploy.morph +++ b/clusters/weston-system-x86_64-generic-deploy.morph @@ -10,7 +10,7 @@ systems: - morph: systems/weston-system-x86_64-generic.morph deploy: weston-system-x86_64-generic: - type: rawdisk + type: extensions/rawdisk location: /weston-system-x86_64-generic.img DISK_SIZE: 4G KERNEL_ARGS: vga=788 @@ -19,5 +19,5 @@ systems: - morph: systems/initramfs-x86_64.morph deploy: initramfs: - type: initramfs + type: extensions/initramfs location: boot/initramfs.gz diff --git a/clusters/zookeeper.morph b/clusters/zookeeper.morph index 1153d4b0..cca6db81 100644 --- a/clusters/zookeeper.morph +++ b/clusters/zookeeper.morph @@ -4,7 +4,7 @@ systems: - morph: systems/zookeeper-client-x86_64.morph deploy: my-client-system: - type: kvm + type: extensions/kvm location: kvm+ssh://username@HOSTNAME/machinename/path/to/zookeeper-client.img DISK_SIZE: 4G RAM_SIZE: 1G @@ -13,7 +13,7 @@ systems: - morph: systems/zookeeper-server-x86_64.morph deploy: my-server-system: - type: kvm + type: extensions/kvm location: kvm+ssh://username@HOSTNAME/machinename/path/to/zookeeper-server.img DISK_SIZE: 4G RAM_SIZE: 1G diff --git a/distbuild-trove-nfsboot.check b/distbuild-trove-nfsboot.check deleted file mode 100755 index 38c491e5..00000000 --- a/distbuild-trove-nfsboot.check +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -'''Preparatory checks for Morph 'distbuild-trove-nfsboot' write extension''' - -import cliapp -import logging -import os - -import morphlib.writeexts - - -class DistbuildTroveNFSBootCheckExtension(morphlib.writeexts.WriteExtension): - - nfsboot_root = '/srv/nfsboot' - remote_user = 'root' - - required_vars = [ - 'DISTBUILD_CONTROLLER', - 'DISTBUILD_GIT_SERVER', - 'DISTBUILD_SHARED_ARTIFACT_CACHE', - 'DISTBUILD_TROVE_ID', - 'DISTBUILD_WORKERS', - 'DISTBUILD_WORKER_SSH_KEY', - ] - - def system_path(self, system_name, version_label=None): - if version_label: - return os.path.join(self.nfsboot_root, system_name, 'systems', - version_label, 'run') - else: - return os.path.join(self.nfsboot_root, system_name) - - def process_args(self, args): - if len(args) != 1: - raise cliapp.AppException('Wrong number of command line args') - - nfs_host = args[0] - nfs_netloc = '%s@%s' % (self.remote_user, nfs_host) - - version_label = os.getenv('VERSION_LABEL', 'factory') - - missing_vars = [var for var in self.required_vars - if not var in os.environ] - if missing_vars: - raise cliapp.AppException( - 'Please set: %s' % ', '.join(missing_vars)) - - controllers = os.getenv('DISTBUILD_CONTROLLER').split() - workers = os.getenv('DISTBUILD_WORKERS').split() - - if len(controllers) != 1: - raise cliapp.AppException('Please specify exactly one controller.') - - if len(workers) == 0: - raise cliapp.AppException('Please specify at least one worker.') - - upgrade = self.get_environment_boolean('UPGRADE') - - self.check_good_server(nfs_netloc) - - system_names = set(controllers + workers) - for system_name in system_names: - if upgrade: - self.check_upgradeable(nfs_netloc, system_name, version_label) - else: - system_path = self.system_path(system_name) - - if self.remote_directory_exists(nfs_netloc, system_path): - if self.get_environment_boolean('OVERWRITE') == False: - raise cliapp.AppException( - 'System %s already exists at %s:%s. Try `morph ' - 'upgrade` instead of `morph deploy`.' % ( - system_name, nfs_netloc, system_path)) - - def check_good_server(self, netloc): - # FIXME: assumes root - self.check_ssh_connectivity(netloc.split('@')[-1]) - - # Is an NFS server - try: - cliapp.ssh_runcmd( - netloc, ['test', '-e', '/etc/exports']) - except cliapp.AppException: - raise cliapp.AppException('server %s is not an nfs server' - % netloc) - try: - cliapp.ssh_runcmd( - netloc, ['systemctl', 'is-enabled', 'nfs-server.service']) - - except cliapp.AppException: - raise cliapp.AppException('server %s does not control its ' - 'nfs server by systemd' % netloc) - - # TFTP server exports /srv/nfsboot/tftp - tftp_root = os.path.join(self.nfsboot_root, 'tftp') - try: - cliapp.ssh_runcmd( - netloc, ['test' , '-d', tftp_root]) - except cliapp.AppException: - raise cliapp.AppException('server %s does not export %s' % - (netloc, tftp_root)) - - def check_upgradeable(self, nfs_netloc, system_name, version_label): - '''Check that there is already a version of the system present. - - Distbuild nodes are stateless, so an upgrade is actually pretty much - the same as an initial deployment. This test is just a sanity check. - - ''' - system_path = self.system_path(system_name) - system_version_path = self.system_path(system_name, version_label) - - if not self.remote_directory_exists(nfs_netloc, system_path): - raise cliapp.AppException( - 'System %s not found at %s:%s, cannot deploy an upgrade.' % ( - system_name, nfs_netloc, system_path)) - - if self.remote_directory_exists(nfs_netloc, system_version_path): - if self.get_environment_boolean('OVERWRITE'): - pass - else: - raise cliapp.AppException( - 'System %s version %s already exists at %s:%s.' % ( - system_name, version_label, nfs_netloc, - system_version_path)) - - def remote_directory_exists(self, nfs_netloc, path): - try: - cliapp.ssh_runcmd(nfs_netloc, ['test', '-d', path]) - except cliapp.AppException as e: - logging.debug('SSH exception: %s', e) - return False - - return True - - -DistbuildTroveNFSBootCheckExtension().run() diff --git a/distbuild-trove-nfsboot.write b/distbuild-trove-nfsboot.write deleted file mode 100755 index a5a5b094..00000000 --- a/distbuild-trove-nfsboot.write +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -'''Morph .write extension for a distbuild network booting off a Trove with NFS. - -''' - - -import os -import sys -import tempfile - -import cliapp -import morphlib.writeexts - - -class DistbuildTroveNFSBootWriteExtension(morphlib.writeexts.WriteExtension): - - '''Create an NFS root and kernel on TFTP during Morph's deployment. - - See distbuild-trove-nfsboot.help for documentation. - - ''' - - nfsboot_root = '/srv/nfsboot' - remote_user = 'root' - - def system_path(self, system_name, version_label=None): - if version_label: - # The 'run' directory is kind of a historical artifact. Baserock - # systems that have Btrfs root disks maintain an orig/ and a run/ - # subvolume, so that one can find changes that have been made at - # runtime. For distbuild systems, this isn't necessary because the - # root filesystems of the nodes are effectively stateless. However, - # existing systems have bootloaders configured to look for the - # 'run' directory, so we need to keep creating it. - return os.path.join(self.nfsboot_root, system_name, 'systems', - version_label, 'run') - else: - return os.path.join(self.nfsboot_root, system_name) - - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - local_system_path, nfs_host = args - - nfs_netloc = '%s@%s' % (self.remote_user, nfs_host) - - version_label = os.getenv('VERSION_LABEL', 'factory') - - controller_name = os.getenv('DISTBUILD_CONTROLLER') - worker_names = os.getenv('DISTBUILD_WORKERS').split() - system_names = set([controller_name] + worker_names) - - git_server = os.getenv('DISTBUILD_GIT_SERVER') - shared_artifact_cache = os.getenv('DISTBUILD_SHARED_ARTIFACT_CACHE') - trove_id = os.getenv('DISTBUILD_TROVE_ID') - worker_ssh_key_path = os.getenv('DISTBUILD_WORKER_SSH_KEY') - - host_map = self.parse_host_map_string(os.getenv('HOST_MAP', '')) - - kernel_relpath = self.find_kernel(local_system_path) - - copied_rootfs = None - for system_name in system_names: - remote_system_path = self.system_path(system_name, version_label) - if copied_rootfs is None: - self.transfer_system( - nfs_netloc, local_system_path, remote_system_path) - copied_rootfs = remote_system_path - else: - self.duplicate_remote_system( - nfs_netloc, copied_rootfs, remote_system_path) - - for system_name in system_names: - remote_system_path = self.system_path(system_name, version_label) - self.link_kernel_to_tftpboot_path( - nfs_netloc, system_name, version_label, kernel_relpath) - self.set_hostname( - nfs_netloc, system_name, remote_system_path) - self.write_distbuild_config( - nfs_netloc, system_name, remote_system_path, git_server, - shared_artifact_cache, trove_id, worker_ssh_key_path, - controller_name, worker_names, host_map=host_map) - - self.configure_nfs_exports(nfs_netloc, system_names) - - for system_name in system_names: - self.update_default_version(nfs_netloc, system_name, version_label) - - def parse_host_map_string(self, host_map_string): - '''Parse the HOST_MAP variable - - Returns a dict mapping hostname to value (where value is an IP - address, a fully-qualified domain name, an alternate hostname, or - whatever). - - ''' - pairs = host_map_string.split(' ') - return morphlib.util.parse_environment_pairs({}, pairs) - - def transfer_system(self, nfs_netloc, local_system_path, - remote_system_path): - self.status(msg='Copying rootfs to %(nfs_netloc)s', - nfs_netloc=nfs_netloc) - cliapp.ssh_runcmd( - nfs_netloc, ['mkdir', '-p', remote_system_path]) - # The deployed rootfs may have been created by OSTree, so definitely - # don't pass --hard-links to `rsync`. - cliapp.runcmd( - ['rsync', '--archive', '--delete', '--info=progress2', - '--protect-args', '--partial', '--sparse', '--xattrs', - local_system_path + '/', - '%s:%s' % (nfs_netloc, remote_system_path)], stdout=sys.stdout) - - def duplicate_remote_system(self, nfs_netloc, source_system_path, - target_system_path): - self.status(msg='Duplicating rootfs to %(target_system_path)s', - target_system_path=target_system_path) - cliapp.ssh_runcmd(nfs_netloc, - ['mkdir', '-p', target_system_path]) - # We can't pass --info=progress2 here, because it may not be available - # in the remote 'rsync'. The --info setting was added in RSync 3.1.0, - # old versions of Baserock have RSync 3.0.9. So the user doesn't get - # any progress info on stdout for the 'duplicate' stage. - cliapp.ssh_runcmd(nfs_netloc, - ['rsync', '--archive', '--delete', '--protect-args', '--partial', - '--sparse', '--xattrs', source_system_path + '/', - target_system_path], stdout=sys.stdout) - - def find_kernel(self, local_system_path): - bootdir = os.path.join(local_system_path, 'boot') - image_names = ['vmlinuz', 'zImage', 'uImage'] - - for name in image_names: - try_path = os.path.join(bootdir, name) - if os.path.exists(try_path): - kernel_path = os.path.relpath(try_path, local_system_path) - break - else: - raise cliapp.AppException( - 'Could not find a kernel in the system: none of ' - '%s found' % ', '.join(image_names)) - return kernel_path - - def link_kernel_to_tftpboot_path(self, nfs_netloc, system_name, - version_label, kernel_relpath): - '''Create links for TFTP server for a system's kernel.''' - - remote_system_path = self.system_path(system_name, version_label) - kernel_dest = os.path.join(remote_system_path, kernel_relpath) - - self.status(msg='Creating links to %(name)s kernel in tftp directory', - name=system_name) - tftp_dir = os.path.join(self.nfsboot_root , 'tftp') - - versioned_kernel_name = "%s-%s" % (system_name, version_label) - kernel_name = system_name - - cliapp.ssh_runcmd(nfs_netloc, - ['ln', '-f', kernel_dest, - os.path.join(tftp_dir, versioned_kernel_name)]) - - cliapp.ssh_runcmd(nfs_netloc, - ['ln', '-sf', versioned_kernel_name, - os.path.join(tftp_dir, kernel_name)]) - - def set_remote_file_contents(self, nfs_netloc, path, text): - with tempfile.NamedTemporaryFile() as f: - f.write(text) - f.flush() - cliapp.runcmd( - ['scp', f.name, '%s:%s' % (nfs_netloc, path)]) - - def set_hostname(self, nfs_netloc, system_name, system_path): - hostname_path = os.path.join(system_path, 'etc', 'hostname') - self.set_remote_file_contents( - nfs_netloc, hostname_path, system_name + '\n') - - def write_distbuild_config(self, nfs_netloc, system_name, system_path, - git_server, shared_artifact_cache, trove_id, - worker_ssh_key_path, controller_name, - worker_names, host_map = {}): - '''Write /etc/distbuild/distbuild.conf on the node. - - This .write extension takes advantage of the 'generic' mode of - distbuild.configure. Each node is not configured until first-boot, - when distbuild-setup.service runs and configures the node based on the - contents of /etc/distbuild/distbuild.conf. - - ''' - def host(hostname): - return host_map.get(hostname, hostname) - - config = { - 'ARTIFACT_CACHE_SERVER': host(shared_artifact_cache), - 'CONTROLLERHOST': host(controller_name), - 'TROVE_HOST': host(git_server), - 'TROVE_ID': trove_id, - 'DISTBUILD_CONTROLLER': system_name == controller_name, - 'DISTBUILD_WORKER': system_name in worker_names, - 'WORKERS': ', '.join(map(host, worker_names)), - 'WORKER_SSH_KEY': '/etc/distbuild/worker.key', - } - - config_text = '\n'.join( - '%s: %s' % (key, value) for key, value in config.iteritems()) - config_text = \ - '# Generated by distbuild-trove-nfsboot.write\n' + \ - config_text + '\n' - path = os.path.join(system_path, 'etc', 'distbuild') - cliapp.ssh_runcmd( - nfs_netloc, ['mkdir', '-p', path]) - cliapp.runcmd( - ['scp', worker_ssh_key_path, '%s:%s' % (nfs_netloc, path)]) - self.set_remote_file_contents( - nfs_netloc, os.path.join(path, 'distbuild.conf'), config_text) - - def configure_nfs_exports(self, nfs_netloc, system_names): - '''Ensure the Trove is set up to export the NFS roots we need. - - This doesn't handle setting up the TFTP daemon. We assume that is - already running. - - ''' - for system_name in system_names: - exported_path = self.system_path(system_name) - exports_path = '/etc/exports' - - # Rather ugly SSH hackery follows to ensure each system path is - # listed in /etc/exports. - try: - cliapp.ssh_runcmd( - nfs_netloc, ['grep', '-q', exported_path, exports_path]) - except cliapp.AppException: - ip_mask = '*' - options = 'rw,no_subtree_check,no_root_squash,async' - exports_string = '%s %s(%s)\n' % (exported_path, ip_mask, - options) - exports_append_sh = '''\ - set -eu - target="$1" - temp=$(mktemp) - cat "$target" > "$temp" - cat >> "$temp" - mv "$temp" "$target" - ''' - cliapp.ssh_runcmd( - nfs_netloc, - ['sh', '-c', exports_append_sh, '--', exports_path], - feed_stdin=exports_string) - - cliapp.ssh_runcmd(nfs_netloc, - ['systemctl', 'restart', 'nfs-server.service']) - - def update_default_version(self, remote_netloc, system_name, - version_label): - self.status(msg='Linking \'default\' to %(version)s for %(system)s', - version=version_label, system=system_name) - system_path = self.system_path(system_name) - system_version_path = os.path.join(system_path, 'systems', - version_label) - default_path = os.path.join(system_path, 'systems', 'default') - - cliapp.ssh_runcmd(remote_netloc, - ['ln', '-sfn', system_version_path, default_path]) - - -DistbuildTroveNFSBootWriteExtension().run() diff --git a/distbuild-trove-nfsboot.write.help b/distbuild-trove-nfsboot.write.help deleted file mode 100644 index 62f1455c..00000000 --- a/distbuild-trove-nfsboot.write.help +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - Deploy a distbuild network, using a Trove to serve the kernel and rootfs. - - The `location` argument is the hostname of the Trove system. - - The following configuration values must be specified: - - - DISTBUILD_CONTROLLER: hostname of controller system - - DISTBUILD_WORKERS: hostnames of each worker system - - DISTBUILD_GIT_SERVER: Trove hostname - - DISTBUILD_SHARED_ARTIFACT_CACHE: Trove hostname - - DISTBUILD_TROVE_ID: Trove ID - - DISTBUILD_WORKER_SSH_KEY: SSH key to be used for ssh:// repos - - A note on TROVE_ID: the current distbuild-setup service requires that - a single 'Trove ID' is specified. This is used in Morph for expanding - keyed URLs. If you set TROVE_ID=foo for example, foo:bar will be expanded - to git://$GIT_SERVER/foo, in addition to the standard baserock: and - upstream: prefixes that you can use. - - The WORKER_SSH_KEY must be provided, even if you don't need it. The - distbuild-setup service could be changed to make it optional. - - The following configuration values are optional: - - - HOST_MAP: a list of key=value pairs mapping hostnames to IP addresses, - or fully-qualified domain names. Useful if you - cannot rely on hostname resolution working for your deploment. - - The extension will connect to root@location via ssh to copy the kernel and - rootfs, and configure the nfs server. It will duplicate the kernel and - rootfs once for each node in the distbuild network. - - The deployment mechanism makes assumptions about the bootloader - configuration of the target machines. diff --git a/distbuild.configure b/distbuild.configure deleted file mode 100644 index 062aaecc..00000000 --- a/distbuild.configure +++ /dev/null @@ -1,132 +0,0 @@ -#!/bin/sh -# Copyright (C) 2013-2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configure extension to configure a Baserock -# build node, as part of a distributed building cluster. It uses the -# following variables from the environment: -# -# * DISTBUILD_CONTROLLER: if 'yes', machine is set up as the controller. -# * DISTBUILD_WORKER: if 'yes', machine is set up as a worker. -# * TROVE_ID: hostname and Trove prefix of the server to pull source -# from and push built artifacts to. -# * TROVE_HOST: FQDN of the same server as in TROVE_ID -# -# The following variable is optional: -# -# * ARTIFACT_CACHE_SERVER: by default artifacts are pushed to the same -# Trove that served the source, but you can use a different one. -# -# The following variable is required for worker nodes only: -# -# * CONTROLLERHOST: hostname or IP address of distbuild controller machine. -# * WORKER_SSH_KEY: identity used to authenticate with Trove -# -# The following variable is required for the controller node only: -# -# * WORKERS: hostnames or IP address of worker nodes, comma-separated. - -set -e - -if [ -n "$DISTBUILD_GENERIC" ]; then - echo "Not configuring the distbuild node, it will be generic" - exit 0 -fi - -# Set default values for these two options if they are unset, so that if the -# user specifies no distbuild config at all the configure extension exits -# without doing anything but does not raise an error. -DISTBUILD_CONTROLLER=${DISTBUILD_CONTROLLER-False} -DISTBUILD_WORKER=${DISTBUILD_WORKER-False} - -if [ "$DISTBUILD_CONTROLLER" = False -a "$DISTBUILD_WORKER" = False ]; then - exit 0 -fi - -set -u - -# Check that all the variables needed are present: - -error_vars=false - -if [ "x$TROVE_HOST" = "x" ]; then - echo "ERROR: TROVE_HOST needs to be defined." - error_vars=true -fi - -if [ "x$TROVE_ID" = "x" ]; then - echo "ERROR: TROVE_ID needs to be defined." - error_vars=true -fi - -if [ "$DISTBUILD_WORKER" = True ]; then - if ! ssh-keygen -lf "$WORKER_SSH_KEY" > /dev/null 2>&1; then - echo "ERROR: WORKER_SSH_KEY is not a vaild ssh key." - error_vars=true - fi - - if [ "x$CONTROLLERHOST" = "x" ]; then - echo "ERROR: CONTROLLERHOST needs to be defined." - error_vars=true - fi -fi - -if [ "$DISTBUILD_CONTROLLER" = True ]; then - if [ "x$WORKERS" = "x" ]; then - echo "ERROR: WORKERS needs to be defined." - error_vars=true - fi -fi - -if "$error_vars"; then - exit 1 -fi - - -ROOT="$1" - -DISTBUILD_DATA="$ROOT/etc/distbuild" -mkdir -p "$DISTBUILD_DATA" - -# If it's a worker, install the worker ssh key. -if [ "$DISTBUILD_WORKER" = True ] -then - install -m 0644 "$WORKER_SSH_KEY" "$DISTBUILD_DATA/worker.key" -fi - - - -# Create the configuration file -python <<'EOF' >"$DISTBUILD_DATA/distbuild.conf" -import os, sys, yaml - -distbuild_configuration={ - 'TROVE_ID': os.environ['TROVE_ID'], - 'TROVE_HOST': os.environ['TROVE_HOST'], - 'DISTBUILD_WORKER': os.environ['DISTBUILD_WORKER'], - 'DISTBUILD_CONTROLLER': os.environ['DISTBUILD_CONTROLLER'], - 'WORKER_SSH_KEY': '/etc/distbuild/worker.key', -} - - -optional_keys = ('ARTIFACT_CACHE_SERVER', 'CONTROLLERHOST', 'WORKERS', - 'TROVE_BACKUP_KEYS') - -for key in optional_keys: - if key in os.environ: - distbuild_configuration[key] = os.environ[key] - -yaml.dump(distbuild_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/extensions/add-config-files.configure b/extensions/add-config-files.configure new file mode 100755 index 00000000..2cf96fd1 --- /dev/null +++ b/extensions/add-config-files.configure @@ -0,0 +1,26 @@ +#!/bin/sh +# Copyright (C) 2013,2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +# Copy all files located in $SRC_CONFIG_DIR to the image /etc. + + +set -e + +if [ "x${SRC_CONFIG_DIR}" != x ] +then + cp -r "$SRC_CONFIG_DIR"/* "$1/etc/" +fi + diff --git a/extensions/busybox-init.configure b/extensions/busybox-init.configure new file mode 100644 index 00000000..c7dba3b9 --- /dev/null +++ b/extensions/busybox-init.configure @@ -0,0 +1,145 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to configure a system +# to use busybox for its init, if INIT_SYSTEM=busybox is specified. +# +# As well as checking INIT_SYSTEM, the following variables are used. +# +# Getty configuration: +# * CONSOLE_DEVICE: Which device to spawn a getty on (default: ttyS0) +# * CONSOLE_BAUDRATE: Baud rate of the console (default: 115200) +# * CONSOLE_MODE: What kind of terminal this console emulates +# (default: vt100) + +if [ "$INIT_SYSTEM" != busybox ]; then + echo Not configuring system to use busybox init. + exit 0 +fi + +set -e +echo Configuring system to use busybox init + +RUN_SCRIPT=/etc/rcS +INIT_SCRIPT=/sbin/init + +install_mdev_config(){ + install -D -m644 /dev/stdin "$1" <<'EOF' +# support module loading on hotplug +$MODALIAS=.* root:root 660 @modprobe "$MODALIAS" + +# null may already exist; therefore ownership has to be changed with command +null root:root 666 @chmod 666 $MDEV +zero root:root 666 +full root:root 666 +random root:root 444 +urandom root:root 444 +hwrandom root:root 444 +grsec root:root 660 + +kmem root:root 640 +mem root:root 640 +port root:root 640 +# console may already exist; therefore ownership has to be changed with command +console root:root 600 @chmod 600 $MDEV +ptmx root:root 666 +pty.* root:root 660 + +# Typical devices + +tty root:root 666 +tty[0-9]* root:root 660 +vcsa*[0-9]* root:root 660 +ttyS[0-9]* root:root 660 + +# block devices +ram[0-9]* root:root 660 +loop[0-9]+ root:root 660 +sd[a-z].* root:root 660 +hd[a-z][0-9]* root:root 660 +md[0-9]* root:root 660 +sr[0-9]* root:root 660 @ln -sf $MDEV cdrom +fd[0-9]* root:root 660 + +# net devices +SUBSYSTEM=net;.* root:root 600 @nameif +tun[0-9]* root:root 600 =net/ +tap[0-9]* root:root 600 =net/ +EOF +} + +install_start_script(){ + install -D -m755 /dev/stdin "$1" <<'EOF' +#!/bin/sh +mount -t devtmpfs devtmpfs /dev +mount -t proc proc /proc +mount -t sysfs sysfs /sys +mkdir -p /dev/pts +mount -t devpts devpts /dev/pts + +echo /sbin/mdev >/proc/sys/kernel/hotplug +mdev -s + +hostname -F /etc/hostname + +run-parts -a start /etc/init.d +EOF +} + +install_inittab(){ + local inittab="$1" + local dev="$2" + local baud="$3" + local mode="$4" + install -D -m644 /dev/stdin "$1" <&2 + exit 1 +} + +install_mdev_config "$1/etc/mdev.conf" + +install_start_script "$1$RUN_SCRIPT" + +install_inittab "$1/etc/inittab" "${CONSOLE_DEV-ttyS0}" \ + "${CONSOLE_BAUD-115200}" "${CONSOLE_MODE-vt100}" + +install_init_symlink "$1$INIT_SCRIPT" diff --git a/extensions/ceph.configure b/extensions/ceph.configure new file mode 100644 index 00000000..c3cd92d1 --- /dev/null +++ b/extensions/ceph.configure @@ -0,0 +1,266 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License.5 +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import cliapp +import sys +import os +import subprocess +import shutil +import re +import stat + +systemd_monitor_template = """ +[Unit] +Description=Ceph Monitor firstboot setup +After=network-online.target + +[Service] +ExecStart=/bin/bash -c "/root/setup-ceph-head | tee /root/monitor-setuplog" +ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-monitor-fboot.service + +[Install] +Wanted-By=multi-user.target +""" + +systemd_monitor_fname_template = "ceph-monitor-fboot.service" + +systemd_osd_template = """ +[Unit] +Description=Ceph osd firstboot setup +After=network-online.target + +[Service] +ExecStart=/bin/bash -c "/root/setup-ceph-node | tee /root/storage-setuplog" +ExecStartPost=/bin/rm /etc/systemd/system/multi-user.target.wants/ceph-storage-fboot.service + +[Install] +Wanted-By=multi-user.target +""" +systemd_osd_fname_template = "ceph-storage-fboot.service" + +ceph_monitor_config_template = """#!/bin/bash +ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' +ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' +ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring +monmaptool --create --add 0 10.0.100.2 --fsid 9ceb9257-7541-4de4-b34b-586079986700 /tmp/monmap +mkdir /var/lib/ceph/mon/ceph-0 +ceph-mon --mkfs -i 0 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring +/etc/init.d/ceph start mon.0 +touch ~/monitor-configured +""" + +ceph_storage_config_template = """#!/bin/bash +scp 10.0.100.2:/var/lib/ceph/bootstrap-osd/ceph.keyring /var/lib/ceph/bootstrap-osd/ +echo -e "n\np\n1\n\n\nw\n" | fdisk /dev/sdb +ceph-disk prepare --cluster ceph --cluster-uuid 9ceb9257-7541-4de4-b34b-586079986700 --fs-type ext4 /dev/sdb1 +sudo ceph-disk activate /dev/sdb1 +/etc/init.d/ceph start osd.0 +touch ~/storage-configured +""" + +executable_file_permissions = stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR | \ + stat.S_IXGRP | stat.S_IRGRP | \ + stat.S_IXOTH | stat.S_IROTH + +class CephConfigurationExtension(cliapp.Application): + """ + Set up ceph server daemons. + + Must include the following environment variables: + + HOSTNAME - Must be defined it is used as the ID for + the monitor and metadata daemons. + CEPH_CONF - Provide a ceph configuration file. + + Optional environment variables: + + CEPH_CLUSTER - Cluster name, if not provided defaults to 'ceph'. + + CEPH_BOOTSTRAP_OSD - Registered key capable of generating OSD + keys. + CEPH_BOOTSTRAP_MDS - Registered key capable of generating MDS + keys. + + Bootstrap keys are required for creating OSD daemons on servers + that do not have a running monitor daemon. They are gathered + by 'ceph-deploy gatherkeys' but can be generated and registered + separately. + + CEPH_MON - (Blank) Create a ceph monitor daemon on the image. + CEPH_MON_KEYRING - Location of monitor keyring. Required by the + monitor if using cephx authentication. + + CEPH_OSD_X_DATA_DIR - Location of data directory for OSD. + Create an OSD daemon on image. 'X' is an integer + id, many osd daemons may be run on same server. + + CEPH_MDS - (Blank) Create a metadata server daemon on server. + """ + + def process_args(self, args): + + if "HOSTNAME" not in os.environ: + print "ERROR: Need a hostname defined by 'HOSTNAME'" + sys.exit(1) + if "CEPH_CLUSTER" not in os.environ: + print "ERROR: Need a cluster name defined by 'CEPH_CLUSTER'" + sys.exit(1) + if "CEPH_CONF" not in os.environ: + print "ERROR: Need a ceph conf file defined by 'CEPH_CONF'" + sys.exit(1) + + self.dest_dir = args[0] + + self.cluster_name = os.environ["CEPH_CLUSTER"] + self.hostname = os.environ["HOSTNAME"] + + self.conf_file = "/etc/ceph/{}.conf".format(self.cluster_name) + self.mon_dir = "/var/lib/ceph/mon/" + self.osd_dir = "/var/lib/ceph/osd/" + self.mds_dir = "/var/lib/ceph/mds/" + self.tmp_dir = "/var/lib/ceph/tmp/" + self.bootstrap_mds_dir = "/var/lib/ceph/bootstrap-mds/" + self.bootstrap_osd_dir = "/var/lib/ceph/bootstrap-osd/" + self.systemd_dir = "/etc/systemd/system/" + self.systemd_multiuser_dir = "/etc/systemd/system/multi-user.target.wants/" + + self.copy_to_img(os.environ["CEPH_CONF"], self.conf_file) + + # Copy over bootstrap keyrings + if "CEPH_BOOTSTRAP_OSD" in os.environ: + self.copy_bootstrap_osd(os.environ["CEPH_BOOTSTRAP_OSD"]); + if "CEPH_BOOTSTRAP_MDS" in os.environ: + self.copy_bootstrap_mds(os.environ["CEPH_BOOTSTRAP_MDS"]); + + # Configure any monitor daemons + if "CEPH_MON" in os.environ: + self.create_mon_data_dir(os.environ.get("CEPH_MON_KEYRING")) + else: + self.create_osd_startup_script("None", "None") + + # Configure any object storage daemons + osd_re = r"CEPH_OSD_(\d+)_DATA_DIR$" + + for env in os.environ.keys(): + match = re.match(osd_re, env) + if match: + osd_data_dir_env = match.group(0) + osd_id = match.group(1) + + self.create_osd_data_dir(osd_id, os.environ.get(osd_data_dir_env)) + + # Configure any mds daemons + if "CEPH_MDS" in os.environ: + self.create_mds_data_dir() + + # Create a fake 'partprobe' + fake_partprobe_filename = self.dest_dir + "/sbin/partprobe" + fake_partprobe = open(fake_partprobe_filename, 'w') + fake_partprobe.write("#!/bin/bash\nexit 0;\n") + fake_partprobe.close() + os.chmod(fake_partprobe_filename, executable_file_permissions) + self.create_startup_scripts() + + def copy_to_img(self, src_file, dest_file): + shutil.copy(src_file, self.dest_dir + dest_file) + + def copy_bootstrap_osd(self, src_file): + self.copy_to_img(src_file, + os.path.join(self.bootstrap_osd_dir, "{}.keyring".format(self.cluster_name))) + + def copy_bootstrap_mds(self, src_file): + self.copy_to_img(src_file, + os.path.join(self.bootstrap_mds_dir, "{}.keyring".format(self.cluster_name))) + + def symlink_to_multiuser(self, fname): + print >> sys.stderr, os.path.join("../", fname) + print >> sys.stderr, self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname) + os.symlink(os.path.join("../", fname), + self.dest_dir + os.path.join(self.systemd_multiuser_dir, fname)) + + def create_mon_data_dir(self, src_keyring): + + #Create the monitor data directory + mon_data_dir = os.path.join(self.mon_dir, "{}-{}".format(self.cluster_name, self.hostname)) + os.makedirs(self.dest_dir + mon_data_dir) + + #Create sysvinit file to start via sysvinit + sysvinit_file = os.path.join(mon_data_dir, "sysvinit") + open(self.dest_dir + sysvinit_file, 'a').close() + + #Create systemd file to initialize the monitor data directory + keyring = "" + if src_keyring: + #Copy the keyring from local to the image + dest_keyring = os.path.join(self.tmp_dir, + "{}-{}.mon.keyring".format(self.cluster_name, self.hostname)) + self.copy_to_img(src_keyring, dest_keyring) + keyring = "--keyring " + dest_keyring + + mon_systemd_fname = systemd_monitor_fname_template + systemd_script_name = self.dest_dir + os.path.join(self.systemd_dir, mon_systemd_fname) + mon_systemd = open(systemd_script_name, 'w') + mon_systemd.write(systemd_monitor_template) + mon_systemd.close() + #Create a symlink to the multi user target + self.symlink_to_multiuser(mon_systemd_fname) + + def create_osd_data_dir(self, osd_id, data_dir): + if not data_dir: + data_dir = '/srv/osd' + osd_id + + #Create the osd data dir + os.makedirs(self.dest_dir + data_dir) + + def create_osd_startup_script(self, osd_id, data_dir): + osd_systemd_fname = systemd_osd_fname_template + osd_full_name = self.dest_dir + os.path.join(self.systemd_dir, osd_systemd_fname) + + osd_systemd = open(osd_full_name, 'w') + + osd_systemd.write(systemd_osd_template) + osd_systemd.close() + + #Create a symlink to the multi user target + self.symlink_to_multiuser(osd_systemd_fname) + + def create_mds_data_dir(self): + + #Create the monitor data directory + mds_data_dir = os.path.join(self.mds_dir, "{}-{}".format(self.cluster_name, self.hostname)) + os.makedirs(self.dest_dir + mds_data_dir) + + #Create sysvinit file to start via sysvinit + sysvinit_file = os.path.join(mds_data_dir, "sysvinit") + open(self.dest_dir + sysvinit_file, 'a').close() + + + def create_startup_scripts(self): + head_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-head") + + ceph_head_setup = open(head_setup_file, "w") + ceph_head_setup.write(ceph_monitor_config_template) + ceph_head_setup.close() + os.chmod(head_setup_file, executable_file_permissions) + + osd_setup_file = os.path.join(self.dest_dir, "root", "setup-ceph-node") + ceph_node_setup = open(osd_setup_file, "w") + ceph_node_setup.write(ceph_storage_config_template) + ceph_node_setup.close() + os.chmod(osd_setup_file, executable_file_permissions) + + +CephConfigurationExtension().run() diff --git a/extensions/cloud-init.configure b/extensions/cloud-init.configure new file mode 100755 index 00000000..aa83e0e2 --- /dev/null +++ b/extensions/cloud-init.configure @@ -0,0 +1,63 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# +# This is a "morph deploy" configuration extension to enable the +# cloud-init services. +set -e + +ROOT="$1" + +########################################################################## + +set -e + +case "$CLOUD_INIT" in +''|False|no) + exit 0 + ;; +True|yes) + echo "Configuring cloud-init" + ;; +*) + echo Unrecognised option "$CLOUD_INIT" to CLOUD_INIT + exit 1 + ;; +esac + + +cloud_init_services="cloud-config.service + cloud-init-local.service + cloud-init.service + cloud-final.service" + +# Iterate over the cloud-init services and enable them creating a link +# into /etc/systemd/system/multi-user.target.wants. +# If the services to link are not present, fail. + +services_folder="lib/systemd/system" +for service_name in $cloud_init_services; do + if [ ! -f "$ROOT/$services_folder/$service_name" ]; then + echo "ERROR: Service $service_name is missing." >&2 + echo "Failed to configure cloud-init." + exit 1 + else + echo Enabling systemd service "$service_name" >"$MORPH_LOG_FD" + ln -sf "/$services_folder/$service_name" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$service_name" + fi +done diff --git a/extensions/distbuild-trove-nfsboot.check b/extensions/distbuild-trove-nfsboot.check new file mode 100755 index 00000000..38c491e5 --- /dev/null +++ b/extensions/distbuild-trove-nfsboot.check @@ -0,0 +1,150 @@ +#!/usr/bin/python +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +'''Preparatory checks for Morph 'distbuild-trove-nfsboot' write extension''' + +import cliapp +import logging +import os + +import morphlib.writeexts + + +class DistbuildTroveNFSBootCheckExtension(morphlib.writeexts.WriteExtension): + + nfsboot_root = '/srv/nfsboot' + remote_user = 'root' + + required_vars = [ + 'DISTBUILD_CONTROLLER', + 'DISTBUILD_GIT_SERVER', + 'DISTBUILD_SHARED_ARTIFACT_CACHE', + 'DISTBUILD_TROVE_ID', + 'DISTBUILD_WORKERS', + 'DISTBUILD_WORKER_SSH_KEY', + ] + + def system_path(self, system_name, version_label=None): + if version_label: + return os.path.join(self.nfsboot_root, system_name, 'systems', + version_label, 'run') + else: + return os.path.join(self.nfsboot_root, system_name) + + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + nfs_host = args[0] + nfs_netloc = '%s@%s' % (self.remote_user, nfs_host) + + version_label = os.getenv('VERSION_LABEL', 'factory') + + missing_vars = [var for var in self.required_vars + if not var in os.environ] + if missing_vars: + raise cliapp.AppException( + 'Please set: %s' % ', '.join(missing_vars)) + + controllers = os.getenv('DISTBUILD_CONTROLLER').split() + workers = os.getenv('DISTBUILD_WORKERS').split() + + if len(controllers) != 1: + raise cliapp.AppException('Please specify exactly one controller.') + + if len(workers) == 0: + raise cliapp.AppException('Please specify at least one worker.') + + upgrade = self.get_environment_boolean('UPGRADE') + + self.check_good_server(nfs_netloc) + + system_names = set(controllers + workers) + for system_name in system_names: + if upgrade: + self.check_upgradeable(nfs_netloc, system_name, version_label) + else: + system_path = self.system_path(system_name) + + if self.remote_directory_exists(nfs_netloc, system_path): + if self.get_environment_boolean('OVERWRITE') == False: + raise cliapp.AppException( + 'System %s already exists at %s:%s. Try `morph ' + 'upgrade` instead of `morph deploy`.' % ( + system_name, nfs_netloc, system_path)) + + def check_good_server(self, netloc): + # FIXME: assumes root + self.check_ssh_connectivity(netloc.split('@')[-1]) + + # Is an NFS server + try: + cliapp.ssh_runcmd( + netloc, ['test', '-e', '/etc/exports']) + except cliapp.AppException: + raise cliapp.AppException('server %s is not an nfs server' + % netloc) + try: + cliapp.ssh_runcmd( + netloc, ['systemctl', 'is-enabled', 'nfs-server.service']) + + except cliapp.AppException: + raise cliapp.AppException('server %s does not control its ' + 'nfs server by systemd' % netloc) + + # TFTP server exports /srv/nfsboot/tftp + tftp_root = os.path.join(self.nfsboot_root, 'tftp') + try: + cliapp.ssh_runcmd( + netloc, ['test' , '-d', tftp_root]) + except cliapp.AppException: + raise cliapp.AppException('server %s does not export %s' % + (netloc, tftp_root)) + + def check_upgradeable(self, nfs_netloc, system_name, version_label): + '''Check that there is already a version of the system present. + + Distbuild nodes are stateless, so an upgrade is actually pretty much + the same as an initial deployment. This test is just a sanity check. + + ''' + system_path = self.system_path(system_name) + system_version_path = self.system_path(system_name, version_label) + + if not self.remote_directory_exists(nfs_netloc, system_path): + raise cliapp.AppException( + 'System %s not found at %s:%s, cannot deploy an upgrade.' % ( + system_name, nfs_netloc, system_path)) + + if self.remote_directory_exists(nfs_netloc, system_version_path): + if self.get_environment_boolean('OVERWRITE'): + pass + else: + raise cliapp.AppException( + 'System %s version %s already exists at %s:%s.' % ( + system_name, version_label, nfs_netloc, + system_version_path)) + + def remote_directory_exists(self, nfs_netloc, path): + try: + cliapp.ssh_runcmd(nfs_netloc, ['test', '-d', path]) + except cliapp.AppException as e: + logging.debug('SSH exception: %s', e) + return False + + return True + + +DistbuildTroveNFSBootCheckExtension().run() diff --git a/extensions/distbuild-trove-nfsboot.write b/extensions/distbuild-trove-nfsboot.write new file mode 100755 index 00000000..a5a5b094 --- /dev/null +++ b/extensions/distbuild-trove-nfsboot.write @@ -0,0 +1,283 @@ +#!/usr/bin/python +# Copyright (C) 2013-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +'''Morph .write extension for a distbuild network booting off a Trove with NFS. + +''' + + +import os +import sys +import tempfile + +import cliapp +import morphlib.writeexts + + +class DistbuildTroveNFSBootWriteExtension(morphlib.writeexts.WriteExtension): + + '''Create an NFS root and kernel on TFTP during Morph's deployment. + + See distbuild-trove-nfsboot.help for documentation. + + ''' + + nfsboot_root = '/srv/nfsboot' + remote_user = 'root' + + def system_path(self, system_name, version_label=None): + if version_label: + # The 'run' directory is kind of a historical artifact. Baserock + # systems that have Btrfs root disks maintain an orig/ and a run/ + # subvolume, so that one can find changes that have been made at + # runtime. For distbuild systems, this isn't necessary because the + # root filesystems of the nodes are effectively stateless. However, + # existing systems have bootloaders configured to look for the + # 'run' directory, so we need to keep creating it. + return os.path.join(self.nfsboot_root, system_name, 'systems', + version_label, 'run') + else: + return os.path.join(self.nfsboot_root, system_name) + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + local_system_path, nfs_host = args + + nfs_netloc = '%s@%s' % (self.remote_user, nfs_host) + + version_label = os.getenv('VERSION_LABEL', 'factory') + + controller_name = os.getenv('DISTBUILD_CONTROLLER') + worker_names = os.getenv('DISTBUILD_WORKERS').split() + system_names = set([controller_name] + worker_names) + + git_server = os.getenv('DISTBUILD_GIT_SERVER') + shared_artifact_cache = os.getenv('DISTBUILD_SHARED_ARTIFACT_CACHE') + trove_id = os.getenv('DISTBUILD_TROVE_ID') + worker_ssh_key_path = os.getenv('DISTBUILD_WORKER_SSH_KEY') + + host_map = self.parse_host_map_string(os.getenv('HOST_MAP', '')) + + kernel_relpath = self.find_kernel(local_system_path) + + copied_rootfs = None + for system_name in system_names: + remote_system_path = self.system_path(system_name, version_label) + if copied_rootfs is None: + self.transfer_system( + nfs_netloc, local_system_path, remote_system_path) + copied_rootfs = remote_system_path + else: + self.duplicate_remote_system( + nfs_netloc, copied_rootfs, remote_system_path) + + for system_name in system_names: + remote_system_path = self.system_path(system_name, version_label) + self.link_kernel_to_tftpboot_path( + nfs_netloc, system_name, version_label, kernel_relpath) + self.set_hostname( + nfs_netloc, system_name, remote_system_path) + self.write_distbuild_config( + nfs_netloc, system_name, remote_system_path, git_server, + shared_artifact_cache, trove_id, worker_ssh_key_path, + controller_name, worker_names, host_map=host_map) + + self.configure_nfs_exports(nfs_netloc, system_names) + + for system_name in system_names: + self.update_default_version(nfs_netloc, system_name, version_label) + + def parse_host_map_string(self, host_map_string): + '''Parse the HOST_MAP variable + + Returns a dict mapping hostname to value (where value is an IP + address, a fully-qualified domain name, an alternate hostname, or + whatever). + + ''' + pairs = host_map_string.split(' ') + return morphlib.util.parse_environment_pairs({}, pairs) + + def transfer_system(self, nfs_netloc, local_system_path, + remote_system_path): + self.status(msg='Copying rootfs to %(nfs_netloc)s', + nfs_netloc=nfs_netloc) + cliapp.ssh_runcmd( + nfs_netloc, ['mkdir', '-p', remote_system_path]) + # The deployed rootfs may have been created by OSTree, so definitely + # don't pass --hard-links to `rsync`. + cliapp.runcmd( + ['rsync', '--archive', '--delete', '--info=progress2', + '--protect-args', '--partial', '--sparse', '--xattrs', + local_system_path + '/', + '%s:%s' % (nfs_netloc, remote_system_path)], stdout=sys.stdout) + + def duplicate_remote_system(self, nfs_netloc, source_system_path, + target_system_path): + self.status(msg='Duplicating rootfs to %(target_system_path)s', + target_system_path=target_system_path) + cliapp.ssh_runcmd(nfs_netloc, + ['mkdir', '-p', target_system_path]) + # We can't pass --info=progress2 here, because it may not be available + # in the remote 'rsync'. The --info setting was added in RSync 3.1.0, + # old versions of Baserock have RSync 3.0.9. So the user doesn't get + # any progress info on stdout for the 'duplicate' stage. + cliapp.ssh_runcmd(nfs_netloc, + ['rsync', '--archive', '--delete', '--protect-args', '--partial', + '--sparse', '--xattrs', source_system_path + '/', + target_system_path], stdout=sys.stdout) + + def find_kernel(self, local_system_path): + bootdir = os.path.join(local_system_path, 'boot') + image_names = ['vmlinuz', 'zImage', 'uImage'] + + for name in image_names: + try_path = os.path.join(bootdir, name) + if os.path.exists(try_path): + kernel_path = os.path.relpath(try_path, local_system_path) + break + else: + raise cliapp.AppException( + 'Could not find a kernel in the system: none of ' + '%s found' % ', '.join(image_names)) + return kernel_path + + def link_kernel_to_tftpboot_path(self, nfs_netloc, system_name, + version_label, kernel_relpath): + '''Create links for TFTP server for a system's kernel.''' + + remote_system_path = self.system_path(system_name, version_label) + kernel_dest = os.path.join(remote_system_path, kernel_relpath) + + self.status(msg='Creating links to %(name)s kernel in tftp directory', + name=system_name) + tftp_dir = os.path.join(self.nfsboot_root , 'tftp') + + versioned_kernel_name = "%s-%s" % (system_name, version_label) + kernel_name = system_name + + cliapp.ssh_runcmd(nfs_netloc, + ['ln', '-f', kernel_dest, + os.path.join(tftp_dir, versioned_kernel_name)]) + + cliapp.ssh_runcmd(nfs_netloc, + ['ln', '-sf', versioned_kernel_name, + os.path.join(tftp_dir, kernel_name)]) + + def set_remote_file_contents(self, nfs_netloc, path, text): + with tempfile.NamedTemporaryFile() as f: + f.write(text) + f.flush() + cliapp.runcmd( + ['scp', f.name, '%s:%s' % (nfs_netloc, path)]) + + def set_hostname(self, nfs_netloc, system_name, system_path): + hostname_path = os.path.join(system_path, 'etc', 'hostname') + self.set_remote_file_contents( + nfs_netloc, hostname_path, system_name + '\n') + + def write_distbuild_config(self, nfs_netloc, system_name, system_path, + git_server, shared_artifact_cache, trove_id, + worker_ssh_key_path, controller_name, + worker_names, host_map = {}): + '''Write /etc/distbuild/distbuild.conf on the node. + + This .write extension takes advantage of the 'generic' mode of + distbuild.configure. Each node is not configured until first-boot, + when distbuild-setup.service runs and configures the node based on the + contents of /etc/distbuild/distbuild.conf. + + ''' + def host(hostname): + return host_map.get(hostname, hostname) + + config = { + 'ARTIFACT_CACHE_SERVER': host(shared_artifact_cache), + 'CONTROLLERHOST': host(controller_name), + 'TROVE_HOST': host(git_server), + 'TROVE_ID': trove_id, + 'DISTBUILD_CONTROLLER': system_name == controller_name, + 'DISTBUILD_WORKER': system_name in worker_names, + 'WORKERS': ', '.join(map(host, worker_names)), + 'WORKER_SSH_KEY': '/etc/distbuild/worker.key', + } + + config_text = '\n'.join( + '%s: %s' % (key, value) for key, value in config.iteritems()) + config_text = \ + '# Generated by distbuild-trove-nfsboot.write\n' + \ + config_text + '\n' + path = os.path.join(system_path, 'etc', 'distbuild') + cliapp.ssh_runcmd( + nfs_netloc, ['mkdir', '-p', path]) + cliapp.runcmd( + ['scp', worker_ssh_key_path, '%s:%s' % (nfs_netloc, path)]) + self.set_remote_file_contents( + nfs_netloc, os.path.join(path, 'distbuild.conf'), config_text) + + def configure_nfs_exports(self, nfs_netloc, system_names): + '''Ensure the Trove is set up to export the NFS roots we need. + + This doesn't handle setting up the TFTP daemon. We assume that is + already running. + + ''' + for system_name in system_names: + exported_path = self.system_path(system_name) + exports_path = '/etc/exports' + + # Rather ugly SSH hackery follows to ensure each system path is + # listed in /etc/exports. + try: + cliapp.ssh_runcmd( + nfs_netloc, ['grep', '-q', exported_path, exports_path]) + except cliapp.AppException: + ip_mask = '*' + options = 'rw,no_subtree_check,no_root_squash,async' + exports_string = '%s %s(%s)\n' % (exported_path, ip_mask, + options) + exports_append_sh = '''\ + set -eu + target="$1" + temp=$(mktemp) + cat "$target" > "$temp" + cat >> "$temp" + mv "$temp" "$target" + ''' + cliapp.ssh_runcmd( + nfs_netloc, + ['sh', '-c', exports_append_sh, '--', exports_path], + feed_stdin=exports_string) + + cliapp.ssh_runcmd(nfs_netloc, + ['systemctl', 'restart', 'nfs-server.service']) + + def update_default_version(self, remote_netloc, system_name, + version_label): + self.status(msg='Linking \'default\' to %(version)s for %(system)s', + version=version_label, system=system_name) + system_path = self.system_path(system_name) + system_version_path = os.path.join(system_path, 'systems', + version_label) + default_path = os.path.join(system_path, 'systems', 'default') + + cliapp.ssh_runcmd(remote_netloc, + ['ln', '-sfn', system_version_path, default_path]) + + +DistbuildTroveNFSBootWriteExtension().run() diff --git a/extensions/distbuild-trove-nfsboot.write.help b/extensions/distbuild-trove-nfsboot.write.help new file mode 100644 index 00000000..62f1455c --- /dev/null +++ b/extensions/distbuild-trove-nfsboot.write.help @@ -0,0 +1,49 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + Deploy a distbuild network, using a Trove to serve the kernel and rootfs. + + The `location` argument is the hostname of the Trove system. + + The following configuration values must be specified: + + - DISTBUILD_CONTROLLER: hostname of controller system + - DISTBUILD_WORKERS: hostnames of each worker system + - DISTBUILD_GIT_SERVER: Trove hostname + - DISTBUILD_SHARED_ARTIFACT_CACHE: Trove hostname + - DISTBUILD_TROVE_ID: Trove ID + - DISTBUILD_WORKER_SSH_KEY: SSH key to be used for ssh:// repos + + A note on TROVE_ID: the current distbuild-setup service requires that + a single 'Trove ID' is specified. This is used in Morph for expanding + keyed URLs. If you set TROVE_ID=foo for example, foo:bar will be expanded + to git://$GIT_SERVER/foo, in addition to the standard baserock: and + upstream: prefixes that you can use. + + The WORKER_SSH_KEY must be provided, even if you don't need it. The + distbuild-setup service could be changed to make it optional. + + The following configuration values are optional: + + - HOST_MAP: a list of key=value pairs mapping hostnames to IP addresses, + or fully-qualified domain names. Useful if you + cannot rely on hostname resolution working for your deploment. + + The extension will connect to root@location via ssh to copy the kernel and + rootfs, and configure the nfs server. It will duplicate the kernel and + rootfs once for each node in the distbuild network. + + The deployment mechanism makes assumptions about the bootloader + configuration of the target machines. diff --git a/extensions/distbuild.configure b/extensions/distbuild.configure new file mode 100644 index 00000000..062aaecc --- /dev/null +++ b/extensions/distbuild.configure @@ -0,0 +1,132 @@ +#!/bin/sh +# Copyright (C) 2013-2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configure extension to configure a Baserock +# build node, as part of a distributed building cluster. It uses the +# following variables from the environment: +# +# * DISTBUILD_CONTROLLER: if 'yes', machine is set up as the controller. +# * DISTBUILD_WORKER: if 'yes', machine is set up as a worker. +# * TROVE_ID: hostname and Trove prefix of the server to pull source +# from and push built artifacts to. +# * TROVE_HOST: FQDN of the same server as in TROVE_ID +# +# The following variable is optional: +# +# * ARTIFACT_CACHE_SERVER: by default artifacts are pushed to the same +# Trove that served the source, but you can use a different one. +# +# The following variable is required for worker nodes only: +# +# * CONTROLLERHOST: hostname or IP address of distbuild controller machine. +# * WORKER_SSH_KEY: identity used to authenticate with Trove +# +# The following variable is required for the controller node only: +# +# * WORKERS: hostnames or IP address of worker nodes, comma-separated. + +set -e + +if [ -n "$DISTBUILD_GENERIC" ]; then + echo "Not configuring the distbuild node, it will be generic" + exit 0 +fi + +# Set default values for these two options if they are unset, so that if the +# user specifies no distbuild config at all the configure extension exits +# without doing anything but does not raise an error. +DISTBUILD_CONTROLLER=${DISTBUILD_CONTROLLER-False} +DISTBUILD_WORKER=${DISTBUILD_WORKER-False} + +if [ "$DISTBUILD_CONTROLLER" = False -a "$DISTBUILD_WORKER" = False ]; then + exit 0 +fi + +set -u + +# Check that all the variables needed are present: + +error_vars=false + +if [ "x$TROVE_HOST" = "x" ]; then + echo "ERROR: TROVE_HOST needs to be defined." + error_vars=true +fi + +if [ "x$TROVE_ID" = "x" ]; then + echo "ERROR: TROVE_ID needs to be defined." + error_vars=true +fi + +if [ "$DISTBUILD_WORKER" = True ]; then + if ! ssh-keygen -lf "$WORKER_SSH_KEY" > /dev/null 2>&1; then + echo "ERROR: WORKER_SSH_KEY is not a vaild ssh key." + error_vars=true + fi + + if [ "x$CONTROLLERHOST" = "x" ]; then + echo "ERROR: CONTROLLERHOST needs to be defined." + error_vars=true + fi +fi + +if [ "$DISTBUILD_CONTROLLER" = True ]; then + if [ "x$WORKERS" = "x" ]; then + echo "ERROR: WORKERS needs to be defined." + error_vars=true + fi +fi + +if "$error_vars"; then + exit 1 +fi + + +ROOT="$1" + +DISTBUILD_DATA="$ROOT/etc/distbuild" +mkdir -p "$DISTBUILD_DATA" + +# If it's a worker, install the worker ssh key. +if [ "$DISTBUILD_WORKER" = True ] +then + install -m 0644 "$WORKER_SSH_KEY" "$DISTBUILD_DATA/worker.key" +fi + + + +# Create the configuration file +python <<'EOF' >"$DISTBUILD_DATA/distbuild.conf" +import os, sys, yaml + +distbuild_configuration={ + 'TROVE_ID': os.environ['TROVE_ID'], + 'TROVE_HOST': os.environ['TROVE_HOST'], + 'DISTBUILD_WORKER': os.environ['DISTBUILD_WORKER'], + 'DISTBUILD_CONTROLLER': os.environ['DISTBUILD_CONTROLLER'], + 'WORKER_SSH_KEY': '/etc/distbuild/worker.key', +} + + +optional_keys = ('ARTIFACT_CACHE_SERVER', 'CONTROLLERHOST', 'WORKERS', + 'TROVE_BACKUP_KEYS') + +for key in optional_keys: + if key in os.environ: + distbuild_configuration[key] = os.environ[key] + +yaml.dump(distbuild_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/fstab.configure b/extensions/fstab.configure new file mode 100755 index 00000000..b9154eee --- /dev/null +++ b/extensions/fstab.configure @@ -0,0 +1,28 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright © 2013-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# =*= License: GPL-2 =*= + + +import os +import sys + +import morphlib + +envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('FSTAB_')} + +conf_file = os.path.join(sys.argv[1], 'etc/fstab') +morphlib.util.write_from_dict(conf_file, envvars) diff --git a/extensions/hosts b/extensions/hosts new file mode 100644 index 00000000..5b97818d --- /dev/null +++ b/extensions/hosts @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/extensions/hosts.configure b/extensions/hosts.configure new file mode 100755 index 00000000..6b068d04 --- /dev/null +++ b/extensions/hosts.configure @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# =*= License: GPL-2 =*= + + +import os +import sys +import socket + +import morphlib + +def validate(var, line): + xs = line.split() + if len(xs) == 0: + raise morphlib.Error("`%s: %s': line is empty" % (var, line)) + + ip = xs[0] + hostnames = xs[1:] + + if len(hostnames) == 0: + raise morphlib.Error("`%s: %s': missing hostname" % (var, line)) + + family = socket.AF_INET6 if ':' in ip else socket.AF_INET + + try: + socket.inet_pton(family, ip) + except socket.error: + raise morphlib.Error("`%s: %s' invalid ip" % (var, ip)) + +envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('HOSTS_')} + +conf_file = os.path.join(sys.argv[1], 'etc/hosts') +morphlib.util.write_from_dict(conf_file, envvars, validate) diff --git a/extensions/image-package-example/README b/extensions/image-package-example/README new file mode 100644 index 00000000..c1322f25 --- /dev/null +++ b/extensions/image-package-example/README @@ -0,0 +1,9 @@ +Image package example scripts +============================= + +These are scripts used to create disk images or install the system onto +an existing disk. + +This is also implemented independently for the rawdisk.write write +extension; see morphlib.writeexts.WriteExtension.create_local_system() +for a similar, python implementation. diff --git a/extensions/image-package-example/common.sh.in b/extensions/image-package-example/common.sh.in new file mode 100644 index 00000000..9a7389a7 --- /dev/null +++ b/extensions/image-package-example/common.sh.in @@ -0,0 +1,72 @@ +#!/bin/false +# Script library to be used by disk-install.sh and make-disk-image.sh + +status(){ + echo "$@" +} + +info(){ + echo "$@" >&2 +} + +warn(){ + echo "$@" >&2 +} + +extract_rootfs(){ + tar -C "$1" -xf @@ROOTFS_TAR_PATH@@ . +} + +make_disk_image(){ + truncate --size "$1" "$2" +} + +format_disk(){ + local disk="$1" + mkfs.ext4 -F -L rootfs "$disk" +} + +install_fs_config(){ + local mountpoint="$1" + local rootdisk="${2-/dev/vda}" + cat >>"$mountpoint/etc/fstab" <&2 + exit 1 +} + +warn(){ + echo "$@" >&2 +} + +info(){ + echo "$@" >&2 +} + +shellescape(){ + echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'" +} + +sedescape(){ + # Escape the passed in string so it can be safely interpolated into + # a sed expression as a literal value. + echo "$1" | sed -e 's/[\/&]/\\&/g' +} + +ROOTDIR="$1" +OUTPUT_TAR="$2" +td="$(mktemp -d)" +IMAGE_SUBDIR="${IMAGE_SUBDIR-image_files}" +SCRIPT_SUBDIR="${SCRIPT_SUBDIR-tools}" +ROOTFS_TAR="${ROOTFS_TAR-rootfs.tar}" + +# Generate shell snippets that will expand to paths to various resources +# needed by the scripts. +# They expand to a single shell word, so constructs like the following work +# SCRIPT_DIR=@@SCRIPT_DIR@@ +# dd if="$SCRIPT_DIR/mbr" of="$disk" count=1 +# tar -C "$mountpoint" -xf @@ROOTFS_TAR_PATH@@ . +find_script_dir='"$(readlink -f "$(dirname "$0")")"' +image_dir="$find_script_dir/../$(shellescape "$IMAGE_SUBDIR")" +rootfs_tar_path="$image_dir/$(shellescape "$ROOTFS_TAR")" + +install_script(){ + local source_file="$1" + local output_dir="$2" + local target_file="$output_dir/$SCRIPT_SUBDIR/$(basename "$source_file" .in)" + sed -e "s/@@SCRIPT_DIR@@/$(sedescape "$find_script_dir")/g" \ + -e "s/@@IMAGE_DIR@@/$(sedescape "$image_dir")/g" \ + -e "s/@@ROOTFS_TAR_PATH@@/$(sedescape "$rootfs_tar_path")/g" \ + "$source_file" \ + | install -D -m 755 /proc/self/fd/0 "$target_file" +} + +install_scripts(){ + local output_dir="$1" + ( + IFS="${INCLUDE_SCRIPTS_SEPARATOR-:}" + for script in $INCLUDE_SCRIPTS; do + local script_path="$(pwd)/$script" + if [ ! -e "$script_path" ]; then + warn Script "$script" not found, ignoring + continue + fi + install_script "$script" "$output_dir" + done + ) +} + +install_bootloader_blobs(){ + local output_dir="$1" + local image_dir="$output_dir/$IMAGE_SUBDIR" + ( + IFS="${BOOTLOADER_BLOBS_SEPARATOR-:}" + for blob in $BOOTLOADER_BLOBS; do + local blob_path="$ROOTDIR/$blob" + if [ ! -e "$blob_path" ]; then + warn Bootloader blob "$blob" not found, ignoring + continue + fi + install -D -m644 "$blob_path" "$image_dir/$(basename "$blob_path")" + done + ) +} + +# Determine a basename for our directory as the same as our tarball with +# extensions removed. This is needed, since tarball packages usually +# have a base directory of its contents, rather then extracting into the +# current directory. +output_dir="$(basename "$OUTPUT_TAR")" +for ext in .xz .bz2 .gzip .gz .tgz .tar; do + output_dir="${output_dir%$ext}" +done + +info Installing scripts +install_scripts "$td/$output_dir" + +info Installing bootloader blobs +install_bootloader_blobs "$td/$output_dir" + +info Writing rootfs tar to "$IMAGE_SUBDIR/$ROOTFS_TAR" +tar -C "$ROOTDIR" -c . \ +| sh -c "${ROOTFS_COMPRESS-cat}" >"$td/$output_dir/$IMAGE_SUBDIR/$ROOTFS_TAR" + +info Writing image package tar to "$OUTPUT_TAR" +tar -C "$td" -c "$output_dir" | sh -c "${OUTPUT_COMPRESS-cat}" >"$OUTPUT_TAR" diff --git a/extensions/initramfs.write b/extensions/initramfs.write new file mode 100755 index 00000000..1059defa --- /dev/null +++ b/extensions/initramfs.write @@ -0,0 +1,26 @@ +#!/bin/sh +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# =*= License: GPL-2 =*= + +set -e + +ROOTDIR="$1" +INITRAMFS_PATH="$2" + +(cd "$ROOTDIR" && + find . -print0 | + cpio -0 -H newc -o) | + gzip -c | install -D -m644 /dev/stdin "$INITRAMFS_PATH" diff --git a/extensions/initramfs.write.help b/extensions/initramfs.write.help new file mode 100644 index 00000000..54d3ae8c --- /dev/null +++ b/extensions/initramfs.write.help @@ -0,0 +1,55 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + + Create an initramfs for a system by taking an existing system and + converting it to the appropriate format. + + The system must have a `/init` executable as the userland entry-point. + This can have a different path, if `rdinit=$path` is added to + the kernel command line. This can be added to the `rawdisk`, + `virtualbox-ssh` and `kvm` write extensions with the `KERNEL_CMDLINE` + option. + + It is possible to use a ramfs as the final rootfs without a `/init` + executable, by setting `root=/dev/mem`, or `rdinit=/sbin/init`, + but this is beyond the scope for the `initramfs.write` extension. + + The intended use of initramfs.write is to be part of a nested + deployment, so the parent system has an initramfs stored as + `/boot/initramfs.gz`. See the following example: + + name: initramfs-test + kind: cluster + systems: + - morph: minimal-system-x86_64-generic + deploy: + system: + type: rawdisk + location: initramfs-system-x86_64.img + DISK_SIZE: 1G + HOSTNAME: initramfs-system + INITRAMFS_PATH: boot/initramfs.gz + subsystems: + - morph: initramfs-x86_64 + deploy: + initramfs: + type: initramfs + location: boot/initramfs.gz + + Parameters: + + * location: the path where the initramfs will be installed (e.g. + `boot/initramfs.gz`) in the above example diff --git a/extensions/install-essential-files.configure b/extensions/install-essential-files.configure new file mode 100755 index 00000000..2779b0d4 --- /dev/null +++ b/extensions/install-essential-files.configure @@ -0,0 +1,42 @@ +#!/usr/bin/env python2 +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +''' A Morph configuration extension for adding essential files to a system + +It will read the manifest files located in essential-files/manifest, +then use the contens of those files to determine which files +to install into the target system. + +''' + +import subprocess +import os + +import cliapp + +class InstallEssentialFilesConfigureExtension(cliapp.Application): + + def process_args(self, args): + target_root = args[0] + os.environ["INSTALL_FILES"] = "essential-files/manifest" + self.install_essential_files(target_root) + + def install_essential_files(self, target_root): + command = os.path.join(os.path.dirname(__file__), + "install-files.configure") + subprocess.check_call([command, target_root]) + +InstallEssentialFilesConfigureExtension().run() diff --git a/extensions/install-essential-files.configure.help b/extensions/install-essential-files.configure.help new file mode 100644 index 00000000..9148aeff --- /dev/null +++ b/extensions/install-essential-files.configure.help @@ -0,0 +1,20 @@ +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + This installs files from the essential-files/ folder in your + definitions.git repo, according to essential-files/manifest. + + It wraps the install-files.configure extension. Take a look to that + extension help to know more about the format of the manifest file. diff --git a/extensions/install-files.configure b/extensions/install-files.configure new file mode 100755 index 00000000..341cce61 --- /dev/null +++ b/extensions/install-files.configure @@ -0,0 +1,134 @@ +#!/usr/bin/python +# Copyright (C) 2013-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +''' A Morph configuration extension for adding arbitrary files to a system + +It will read the manifest files specified in the environment variable +INSTALL_FILES, then use the contens of those files to determine which files +to install into the target system. + +''' + +import cliapp +import os +import errno +import re +import sys +import shlex +import shutil +import stat + +try: + import jinja2 + jinja_available = True +except ImportError: + jinja_available = False + +class InstallFilesConfigureExtension(cliapp.Application): + + def process_args(self, args): + if not 'INSTALL_FILES' in os.environ: + return + target_root = args[0] + manifests = shlex.split(os.environ['INSTALL_FILES']) + for manifest in manifests: + self.install_manifest(manifest, target_root) + + def install_manifest(self, manifest, target_root): + manifest_dir = os.path.dirname(manifest) + with open(manifest) as f: + entries = f.readlines() + for entry in entries: + self.install_entry(entry, manifest_dir, target_root) + + def force_symlink(self, source, link_name): + try: + os.symlink(source, link_name) + except OSError as e: + if e.errno == errno.EEXIST: + os.remove(link_name) + os.symlink(source, link_name) + + def install_entry(self, entry, manifest_root, target_root): + m = re.match('(template )?(overwrite )?' + '([0-7]+) ([0-9]+) ([0-9]+) (\S+)', entry) + + if m: + template = m.group(1) + overwrite = m.group(2) + mode = int(m.group(3), 8) # mode is octal + uid = int(m.group(4)) + gid = int(m.group(5)) + path = m.group(6) + else: + raise cliapp.AppException('Invalid manifest entry, ' + 'format: [template] [overwrite] ' + ' ') + + dest_path = os.path.join(target_root, './' + path) + if stat.S_ISDIR(mode): + if os.path.exists(dest_path) and not overwrite: + dest_stat = os.stat(dest_path) + if (mode != dest_stat.st_mode + or uid != dest_stat.st_uid + or gid != dest_stat.st_gid): + raise cliapp.AppException('"%s" exists and is not ' + 'identical to directory ' + '"%s"' % (dest_path, entry)) + else: + os.mkdir(dest_path, mode) + os.chown(dest_path, uid, gid) + os.chmod(dest_path, mode) + + elif stat.S_ISLNK(mode): + if os.path.lexists(dest_path) and not overwrite: + raise cliapp.AppException('Symlink already exists at %s' + % dest_path) + else: + linkdest = os.readlink(os.path.join(manifest_root, + './' + path)) + self.force_symlink(linkdest, dest_path) + os.lchown(dest_path, uid, gid) + + elif stat.S_ISREG(mode): + if os.path.lexists(dest_path) and not overwrite: + raise cliapp.AppException('File already exists at %s' + % dest_path) + else: + if template: + if not jinja_available: + raise cliapp.AppException( + "Failed to install template file `%s': " + 'install-files templates require jinja2' + % path) + + loader = jinja2.FileSystemLoader(manifest_root) + env = jinja2.Environment(loader=loader, + keep_trailing_newline=True) + + env.get_template(path).stream(os.environ).dump(dest_path) + else: + shutil.copyfile(os.path.join(manifest_root, './' + path), + dest_path) + + os.chown(dest_path, uid, gid) + os.chmod(dest_path, mode) + + else: + raise cliapp.AppException('Mode given in "%s" is not a file,' + ' symlink or directory' % entry) + +InstallFilesConfigureExtension().run() diff --git a/extensions/install-files.configure.help b/extensions/install-files.configure.help new file mode 100644 index 00000000..991c26c8 --- /dev/null +++ b/extensions/install-files.configure.help @@ -0,0 +1,74 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + Install a set of files onto a system + + To use this extension you create a directory of files you want to install + onto the target system. + + In this example we want to copy some ssh keys onto a system + + % mkdir sshkeyfiles + % mkdir -p sshkeyfiles/root/.ssh + % cp id_rsa sshkeyfiles/root/.ssh + % cp id_rsa.pub sshkeyfiles/root/.ssh + + Now we need to create a manifest file to set the file modes + and persmissions. The manifest file should be created inside the + directory that contains the files we're trying to install. + + cat << EOF > sshkeyfiles/manifest + 0040755 0 0 /root/.ssh + 0100600 0 0 /root/.ssh/id_rsa + 0100644 0 0 /root/.ssh/id_rsa.pub + EOF + + Then we add the path to our manifest to our cluster morph, + this path should be relative to the system definitions repository. + + INSTALL_FILES: sshkeysfiles/manifest + + More generally entries in the manifest are formatted as: + [overwrite] + + NOTE: Directories on the target must be created if they do not exist. + + The extension supports files, symlinks and directories. + + For example, + + 0100644 0 0 /etc/issue + + creates a regular file at /etc/issue with 644 permissions, + uid 0 and gid 0, if the file doesn't already exist. + + overwrite 0100644 0 0 /etc/issue + + creates a regular file at /etc/issue with 644 permissions, + uid 0 and gid 0, if the file already exists it is overwritten. + + 0100755 0 0 /usr/bin/foo + + creates an executable file at /usr/bin/foo + + 0040755 0 0 /etc/foodir + + creates a directory with 755 permissions + + 0120000 0 0 /usr/bin/bar + + creates a symlink at /usr/bin/bar + + NOTE: You will still need to make a symlink in the manifest directory. diff --git a/extensions/installer.configure b/extensions/installer.configure new file mode 100755 index 00000000..a77dc851 --- /dev/null +++ b/extensions/installer.configure @@ -0,0 +1,48 @@ +#!/usr/bin/python +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to configure an installer +# system. It will create the configuration needed in the installer system +# to perform an installation. It uses the following variables from the +# environment: +# +# * INSTALLER_TARGET_STORAGE_DEVICE +# * INSTALLER_ROOTFS_TO_INSTALL +# * INSTALLER_POST_INSTALL_COMMAND (optional, defaults to `reboot -f`) + +import os +import sys +import yaml + +install_config_file = os.path.join(sys.argv[1], 'etc', 'install.conf') + +try: + installer_configuration = { + 'INSTALLER_TARGET_STORAGE_DEVICE': os.environ['INSTALLER_TARGET_STORAGE_DEVICE'], + 'INSTALLER_ROOTFS_TO_INSTALL': os.environ['INSTALLER_ROOTFS_TO_INSTALL'], + } +except KeyError as e: + print "Not configuring as an installer system" + sys.exit(0) + +postinstkey = 'INSTALLER_POST_INSTALL_COMMAND' +installer_configuration[postinstkey] = os.environ.get(postinstkey, 'reboot -f') + +with open(install_config_file, 'w') as f: + f.write( yaml.dump(installer_configuration, default_flow_style=False) ) + +print "Configuration of the installer system in %s" % install_config_file diff --git a/extensions/jffs2.write b/extensions/jffs2.write new file mode 100644 index 00000000..46b69a53 --- /dev/null +++ b/extensions/jffs2.write @@ -0,0 +1,64 @@ +#!/usr/bin/python +#-*- coding: utf-8 -*- +# Copyright © 2015 Codethink Limited + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +'''A Morph deployment write extension for creating images with jffs2 + as the root filesystem.''' + + +import cliapp +import os + +import morphlib.writeexts + + +class Jffs2WriteExtension(morphlib.writeexts.WriteExtension): + + '''See jffs2.write.help for documentation.''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + + try: + self.create_jffs2_system(temp_root, location) + self.status(msg='Disk image has been created at %(location)s', + location = location) + except Exception: + self.status(msg='Failure to deploy system to %(location)s', + location = location) + raise + + def create_jffs2_system(self, temp_root, location): + erase_block = self.get_erase_block_size() + cliapp.runcmd( + ['mkfs.jffs2', '--pad', '--no-cleanmarkers', + '--eraseblock='+erase_block, '-d', temp_root, '-o', location]) + + def get_erase_block_size(self): + erase_block = os.environ.get('ERASE_BLOCK', '') + + if erase_block == '': + raise cliapp.AppException('ERASE_BLOCK was not given') + + if not erase_block.isdigit(): + raise cliapp.AppException('ERASE_BLOCK must be a whole number') + + return erase_block + +Jffs2WriteExtension().run() diff --git a/extensions/jffs2.write.help b/extensions/jffs2.write.help new file mode 100644 index 00000000..059a354b --- /dev/null +++ b/extensions/jffs2.write.help @@ -0,0 +1,28 @@ +#-*- coding: utf-8 -*- +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + + Creates a system produced by Morph build with a jffs2 filesystem and then + writes to an image. To use this extension, the host system must have access + to mkfs.jffs2 which is provided in the mtd-utilities.morph stratum. + + Parameters: + + * location: the pathname of the disk image to be created/upgraded, or the + path to the physical device. + + * ERASE_BLOCK: the erase block size of the target system, which can be + found in '/sys/class/mtd/mtdx/erasesize' diff --git a/extensions/kvm.check b/extensions/kvm.check new file mode 100755 index 00000000..67cb3d38 --- /dev/null +++ b/extensions/kvm.check @@ -0,0 +1,169 @@ +#!/usr/bin/python +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +'''Preparatory checks for Morph 'kvm' write extension''' + +import cliapp +import os +import re +import urlparse + +import morphlib.writeexts + + +class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): + + location_pattern = '^/(?P[^/]+)(?P/.+)$' + + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + self.require_btrfs_in_deployment_host_kernel() + + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + raise cliapp.AppException( + 'Use the `ssh-rsync` write extension to deploy upgrades to an ' + 'existing remote system.') + + location = args[0] + ssh_host, vm_name, vm_path = self.check_and_parse_location(location) + + self.check_ssh_connectivity(ssh_host) + self.check_can_create_file_at_given_path(ssh_host, vm_path) + self.check_no_existing_libvirt_vm(ssh_host, vm_name) + self.check_extra_disks_exist(ssh_host, self.parse_attach_disks()) + self.check_virtual_networks_are_started(ssh_host) + self.check_host_has_virtinstall(ssh_host) + + def check_and_parse_location(self, location): + '''Check and parse the location argument to get relevant data.''' + + x = urlparse.urlparse(location) + + if x.scheme != 'kvm+ssh': + raise cliapp.AppException( + 'URL schema must be kvm+ssh in %s' % location) + + m = re.match(self.location_pattern, x.path) + if not m: + raise cliapp.AppException('Cannot parse location %s' % location) + + return x.netloc, m.group('guest'), m.group('path') + + def check_no_existing_libvirt_vm(self, ssh_host, vm_name): + try: + cliapp.ssh_runcmd(ssh_host, + ['virsh', '--connect', 'qemu:///system', 'domstate', vm_name]) + except cliapp.AppException as e: + pass + else: + raise cliapp.AppException( + 'Host %s already has a VM named %s. You can use the ssh-rsync ' + 'write extension to deploy upgrades to existing machines.' % + (ssh_host, vm_name)) + + def check_can_create_file_at_given_path(self, ssh_host, vm_path): + + def check_can_write_to_given_path(): + try: + cliapp.ssh_runcmd(ssh_host, ['touch', vm_path]) + except cliapp.AppException as e: + raise cliapp.AppException("Can't write to location %s on %s" + % (vm_path, ssh_host)) + else: + cliapp.ssh_runcmd(ssh_host, ['rm', vm_path]) + + try: + cliapp.ssh_runcmd(ssh_host, ['test', '-e', vm_path]) + except cliapp.AppException as e: + # vm_path doesn't already exist, so let's test we can write + check_can_write_to_given_path() + else: + raise cliapp.AppException('%s already exists on %s' + % (vm_path, ssh_host)) + + def check_extra_disks_exist(self, ssh_host, filename_list): + for filename in filename_list: + try: + cliapp.ssh_runcmd(ssh_host, ['ls', filename]) + except cliapp.AppException as e: + raise cliapp.AppException('Did not find file %s on host %s' % + (filename, ssh_host)) + + def check_virtual_networks_are_started(self, ssh_host): + + def check_virtual_network_is_started(network_name): + cmd = ['virsh', '-c', 'qemu:///system', 'net-info', network_name] + net_info = cliapp.ssh_runcmd(ssh_host, cmd).split('\n') + + def pretty_concat(lines): + return '\n'.join(['\t%s' % line for line in lines]) + + for line in net_info: + m = re.match('^Active:\W*(\w+)\W*', line) + if m: + break + else: + raise cliapp.AppException( + "Got unexpected output parsing output of `%s':\n%s" + % (' '.join(cmd), pretty_concat(net_info))) + + network_active = m.group(1) == 'yes' + + if not network_active: + raise cliapp.AppException("Network '%s' is not started" + % network_name) + + def name(nic_entry): + if ',' in nic_entry: + # network=NETWORK_NAME,mac=12:34,model=e1000... + return nic_entry[:nic_entry.find(',')].lstrip('network=') + else: + return nic_entry.lstrip('network=') # NETWORK_NAME + + if 'NIC_CONFIG' in os.environ: + nics = os.environ['NIC_CONFIG'].split() + + for n in nics: + if not (n.startswith('network=') + or n.startswith('bridge=') + or n == 'user'): + raise cliapp.AppException('malformed NIC_CONFIG: %s\n' + " (expected 'bridge=BRIDGE' 'network=NAME'" + " or 'user')" % n) + + # --network bridge= is used to specify a bridge + # --network user is used to specify a form of NAT + # (see the virt-install(1) man page) + networks = [name(n) for n in nics if not n.startswith('bridge=') + and not n.startswith('user')] + else: + networks = ['default'] + + for network in networks: + check_virtual_network_is_started(network) + + def check_host_has_virtinstall(self, ssh_host): + try: + cliapp.ssh_runcmd(ssh_host, ['which', 'virt-install']) + except cliapp.AppException: + raise cliapp.AppException( + 'virt-install does not seem to be installed on host %s' + % ssh_host) + + +KvmPlusSshCheckExtension().run() diff --git a/extensions/kvm.write b/extensions/kvm.write new file mode 100755 index 00000000..0d0c095b --- /dev/null +++ b/extensions/kvm.write @@ -0,0 +1,120 @@ +#!/usr/bin/python +# Copyright (C) 2012-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +'''A Morph deployment write extension for deploying to KVM+libvirt. + +See file kvm.write.help for documentation + +''' + + +import cliapp +import os +import re +import sys +import tempfile +import urlparse + +import morphlib.writeexts + + +class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): + + location_pattern = '^/(?P[^/]+)(?P/.+)$' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + ssh_host, vm_name, vm_path = self.parse_location(location) + autostart = self.get_environment_boolean('AUTOSTART') + + fd, raw_disk = tempfile.mkstemp() + os.close(fd) + self.create_local_system(temp_root, raw_disk) + + try: + self.transfer(raw_disk, ssh_host, vm_path) + self.create_libvirt_guest(ssh_host, vm_name, vm_path, autostart) + except BaseException: + sys.stderr.write('Error deploying to libvirt') + os.remove(raw_disk) + cliapp.ssh_runcmd(ssh_host, ['rm', '-f', vm_path]) + raise + else: + os.remove(raw_disk) + + self.status( + msg='Virtual machine %(vm_name)s has been created', + vm_name=vm_name) + + def parse_location(self, location): + '''Parse the location argument to get relevant data.''' + + x = urlparse.urlparse(location) + m = re.match('^/(?P[^/]+)(?P/.+)$', x.path) + return x.netloc, m.group('guest'), m.group('path') + + def transfer(self, raw_disk, ssh_host, vm_path): + '''Transfer raw disk image to libvirt host.''' + + self.status(msg='Transferring disk image') + + xfer_hole_path = morphlib.util.get_data_path('xfer-hole') + recv_hole = morphlib.util.get_data('recv-hole') + + ssh_remote_cmd = [ + 'sh', '-c', recv_hole, 'dummy-argv0', 'file', vm_path + ] + + cliapp.runcmd( + ['python', xfer_hole_path, raw_disk], + ['ssh', ssh_host] + map(cliapp.shell_quote, ssh_remote_cmd), + stdout=None, stderr=None) + + def create_libvirt_guest(self, ssh_host, vm_name, vm_path, autostart): + '''Create the libvirt virtual machine.''' + + self.status(msg='Creating libvirt/kvm virtual machine') + + attach_disks = self.parse_attach_disks() + attach_opts = [] + for disk in attach_disks: + attach_opts.extend(['--disk', 'path=%s' % disk]) + + if 'NIC_CONFIG' in os.environ: + nics = os.environ['NIC_CONFIG'].split() + for nic in nics: + attach_opts.extend(['--network', nic]) + + ram_mebibytes = str(self.get_ram_size() / (1024**2)) + + vcpu_count = str(self.get_vcpu_count()) + + cmdline = ['virt-install', '--connect', 'qemu:///system', + '--import', '--name', vm_name, '--vnc', + '--ram', ram_mebibytes, '--vcpus', vcpu_count, + '--disk', 'path=%s,bus=ide' % vm_path] + attach_opts + if not autostart: + cmdline += ['--noreboot'] + cliapp.ssh_runcmd(ssh_host, cmdline) + + if autostart: + cliapp.ssh_runcmd(ssh_host, + ['virsh', '--connect', 'qemu:///system', 'autostart', vm_name]) + +KvmPlusSshWriteExtension().run() diff --git a/extensions/kvm.write.help b/extensions/kvm.write.help new file mode 100644 index 00000000..812a5309 --- /dev/null +++ b/extensions/kvm.write.help @@ -0,0 +1,90 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + + Deploy a Baserock system as a *new* KVM/LibVirt virtual machine. + + Use the `ssh-rsync` write extension to deploy upgrades to an *existing* VM + + Parameters: + + * location: a custom URL scheme of the form `kvm+ssh://HOST/GUEST/PATH`, + where: + * HOST is the name of the host on which KVM/LibVirt is running + * GUEST is the name of the guest VM on that host + * PATH is the path to the disk image that should be created, + on that host. For example, + `kvm+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where + * `alice@192.168.122.1` is the target host as given to ssh, + **from within the development host** (which may be + different from the target host's normal address); + * `testsys` is the name of the new guest VM'; + * `/home/alice/testys.img` is the pathname of the disk image files + on the target host. + + * HOSTNAME=name: the hostname of the **guest** VM within the network into + which it is being deployed + + * DISK_SIZE=X: the size of the VM's primary virtual hard disk. `X` should + use a suffix of `K`, `M`, or `G` (in upper or lower case) to indicate + kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would create a + 100 gigabyte disk image. **This parameter is mandatory**. + + * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate + for itself from the host. `X` is interpreted in the same was as for + DISK_SIZE`, and defaults to `1G` + + * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do + not use more CPU cores than you have available physically (real cores, no + hyperthreads) + + * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to + tell Linux to use, rather than booting the rootfs directly. + + * AUTOSTART=` - boolean. If it is set, the VM will be started when + it has been deployed. + + * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree + binary - Give the full path (without a leading /) to the location of the + DTB in the built system image . The deployment will fail if `path` does + not exist. + + * BOOTLOADER_INSTALL=value: the bootloader to be installed + **(MANDATORY)** for non-x86 systems + + allowed values = + - 'extlinux' (default) - the extlinux bootloader will + be installed + - 'none' - no bootloader will be installed by `morph deploy`. A + bootloader must be installed manually. This value must be used when + deploying non-x86 systems such as ARM. + + * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used. + If not specified for x86-32 and x86-64 systems, 'extlinux' will be used + + allowed values = + - 'extlinux' + + * KERNEL_ARGS=args: optional additional kernel command-line parameters to + be appended to the default set. The default set is: + + 'rw init=/sbin/init rootfstype=btrfs \ + rootflags=subvol=systems/default/run \ + root=[name or UUID of root filesystem]' + + (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt) + + (See `morph help deploy` for details of how to pass parameters to write + extensions) diff --git a/extensions/mason.configure b/extensions/mason.configure new file mode 100644 index 00000000..40fdfe46 --- /dev/null +++ b/extensions/mason.configure @@ -0,0 +1,153 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to fully configure +# a Mason instance at deployment time. It uses the following variables +# from the environment: +# +# * ARTIFACT_CACHE_SERVER +# * MASON_CLUSTER_MORPHOLOGY +# * MASON_DEFINITIONS_REF +# * MASON_DISTBUILD_ARCH +# * MASON_TEST_HOST +# * OPENSTACK_NETWORK_ID +# * TEST_INFRASTRUCTURE_TYPE +# * TROVE_HOST +# * TROVE_ID +# * CONTROLLERHOST + +set -e + +########################################################################## +# Copy Mason files into root filesystem +########################################################################## + +ROOT="$1" + +mkdir -p "$ROOT"/usr/lib/mason +cp extensions/mason/mason.sh "$ROOT"/usr/lib/mason/mason.sh +cp extensions/mason/mason-report.sh "$ROOT"/usr/lib/mason/mason-report.sh +cp extensions/mason/os-init-script "$ROOT"/usr/lib/mason/os-init-script + +cp extensions/mason/mason.timer "$ROOT"/etc/systemd/system/mason.timer + +cp extensions/mason/mason.service "$ROOT"/etc/systemd/system/mason.service + +########################################################################## +# Set up httpd web server +########################################################################## + +cp extensions/mason/httpd.service "$ROOT"/etc/systemd/system/httpd.service + +mkdir -p "$ROOT"/srv/mason + +cat >>"$ROOT"/etc/httpd.conf <"$MASON_DATA/mason.conf" +import os, sys, yaml + +mason_configuration={ + 'ARTIFACT_CACHE_SERVER': os.environ['ARTIFACT_CACHE_SERVER'], + 'MASON_CLUSTER_MORPHOLOGY': os.environ['MASON_CLUSTER_MORPHOLOGY'], + 'MASON_DEFINITIONS_REF': os.environ['MASON_DEFINITIONS_REF'], + 'MASON_DISTBUILD_ARCH': os.environ['MASON_DISTBUILD_ARCH'], + 'MASON_TEST_HOST': os.environ['MASON_TEST_HOST'], + 'OPENSTACK_NETWORK_ID': os.environ['OPENSTACK_NETWORK_ID'], + 'TEST_INFRASTRUCTURE_TYPE': os.environ['TEST_INFRASTRUCTURE_TYPE'], + 'TROVE_ID': os.environ['TROVE_ID'], + 'TROVE_HOST': os.environ['TROVE_HOST'], + 'CONTROLLERHOST': os.environ['CONTROLLERHOST'], +} + +yaml.dump(mason_configuration, sys.stdout, default_flow_style=False) +EOF + +if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then + python <<'EOF' >>"$MASON_DATA/mason.conf" +import os, sys, yaml + +openstack_credentials={ + 'OS_USERNAME': os.environ['OPENSTACK_USER'], + 'OS_TENANT_NAME': os.environ['OPENSTACK_TENANT'], + 'OS_TENANT_ID': os.environ['OPENSTACK_TENANT_ID'], + 'OS_AUTH_URL': os.environ['OPENSTACK_AUTH_URL'], + 'OS_PASSWORD': os.environ['OPENSTACK_PASSWORD'], +} + +yaml.dump(openstack_credentials, sys.stdout, default_flow_style=False) +EOF +fi + +########################################################################## +# Enable services +########################################################################## + +ln -s ../mason.timer "$ROOT"/etc/systemd/system/multi-user.target.wants/mason.timer +ln -s ../httpd.service "$ROOT"/etc/systemd/system/multi-user.target.wants/httpd.service diff --git a/extensions/mason/ansible/hosts b/extensions/mason/ansible/hosts new file mode 100644 index 00000000..5b97818d --- /dev/null +++ b/extensions/mason/ansible/hosts @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/extensions/mason/ansible/mason-setup.yml b/extensions/mason/ansible/mason-setup.yml new file mode 100644 index 00000000..d1528dbb --- /dev/null +++ b/extensions/mason/ansible/mason-setup.yml @@ -0,0 +1,83 @@ +--- +- hosts: localhost + vars_files: + - "/etc/mason/mason.conf" + tasks: + + + - fail: msg='TROVE_ID is mandatory' + when: TROVE_ID is not defined + + - fail: msg='TROVE_HOST is mandatory' + when: TROVE_HOST is not defined + + - fail: msg='ARTIFACT_CACHE_SERVER is mandatory' + when: ARTIFACT_CACHE_SERVER is not defined + + - fail: msg='MASON_CLUSTER_MORPHOLOGY is mandatory' + when: MASON_CLUSTER_MORPHOLOGY is not defined + + - fail: msg='MASON_DEFINITIONS_REF is mandatory' + when: MASON_DEFINITIONS_REF is not defined + + - fail: msg='MASON_DISTBUILD_ARCH is mandatory' + when: MASON_DISTBUILD_ARCH is not defined + + - fail: msg='MASON_TEST_HOST is mandatory' + when: MASON_TEST_HOST is not defined + + - fail: msg='CONTROLLERHOST is mandatory' + when: CONTROLLERHOST is not defined + + - fail: msg='TEST_INFRASTRUCTURE_TYPE is mandatory' + when: TEST_INFRASTRUCTURE_TYPE is not defined + + - fail: msg='OPENSTACK_NETWORK_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' + when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OPENSTACK_NETWORK_ID is not defined + + - fail: msg='OS_USERNAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' + when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_USERNAME is not defined + + - fail: msg='OS_PASSWORD is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' + when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_PASSWORD is not defined + + - fail: msg='OS_TENANT_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' + when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_ID is not defined + + - fail: msg='OS_TENANT_NAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' + when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_NAME is not defined + + - fail: msg='OS_AUTH_URL is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' + when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_AUTH_URL is not defined + + - name: Create the Mason configuration file + template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }} + with_items: + - mason.conf + + - name: Create the OpenStack credentials file + template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }} + with_items: + - os.conf + when: TEST_INFRASTRUCTURE_TYPE == "openstack" + + - name: Enable the mason service + service: name=mason.service enabled=yes + register: mason_service + - name: Restart the mason service + service: name=mason.service state=restarted + when: mason_service|changed + + - name: Enable the mason timer + service: name=mason.timer enabled=yes + register: mason_timer + - name: Restart the mason timer + service: name=mason.timer state=restarted + when: mason_timer|changed + + - name: Enable the httpd service + service: name=httpd.service enabled=yes + register: httpd_service + - name: Restart the httpd service + service: name=httpd state=restarted + when: httpd_service|changed diff --git a/extensions/mason/httpd.service b/extensions/mason/httpd.service new file mode 100644 index 00000000..7572b732 --- /dev/null +++ b/extensions/mason/httpd.service @@ -0,0 +1,10 @@ +[Unit] +Description=HTTP server for Mason +After=network.target + +[Service] +User=root +ExecStart=/usr/sbin/httpd -f -p 80 -h /srv/mason + +[Install] +WantedBy=multi-user.target diff --git a/extensions/mason/mason-generator.sh b/extensions/mason/mason-generator.sh new file mode 100755 index 00000000..187db72c --- /dev/null +++ b/extensions/mason/mason-generator.sh @@ -0,0 +1,101 @@ +#!/bin/sh + +set -e + +if [ "$#" -lt 5 -o "$#" -gt 6 -o "$1" == "-h" -o "$1" == "--help" ]; then + cat < $REPORT_PATH <<'EOF' + + + + + + + +

Mason

+

Baserock: Continuous Delivery

+

Build log of changes to BRANCH from TROVE. Most recent first.

+ + + + + + + + +
StartedRefDurationResult
+ + + +EOF + + sed -i 's/BRANCH/'"$(sed_escape "$1")"'/' $REPORT_PATH + sed -i 's/TROVE/'"$(sed_escape "$2")"'/' $REPORT_PATH +} + +update_report() { + # Give function params sensible names + build_start_time="$1" + build_trove_host="$2" + build_ref="$3" + build_sha1="$4" + build_duration="$5" + build_result="$6" + + # Generate template if report file is not there + if [ ! -f $REPORT_PATH ]; then + create_report $build_ref $build_trove_host + fi + + # Build table row for insertion into report file + if [ "$build_result" = nonet ]; then + msg=''"${build_start_time}"'Failed to contact '"${build_trove_host}"''"${build_duration}s"''"${build_result}"'' + else + msg=''"${build_start_time}"''"${build_sha1}"''"${build_duration}s"''"${build_result}"'' + fi + + # Insert report line, newest at top + sed -i 's//\n'"$(sed_escape "$msg")"'/' $REPORT_PATH +} + +update_report_time() { + # Give function params sensible names + build_start_time="$1" + + # If the report file exists, update the last-checked-for-updates time + if [ -f $REPORT_PATH ]; then + sed -i 's/....-..-.. ..:..:..<\/code>/'"$(sed_escape "$build_start_time")"'<\/code>/' $REPORT_PATH + fi +} + +START_TIME=`date +%Y-%m-%d\ %T` + +update_report_time "$START_TIME" +cp "$REPORT_PATH" "$SERVER_PATH/index.html" + +logfile="$(mktemp)" +/usr/lib/mason/mason.sh 2>&1 | tee "$logfile" +case "${PIPESTATUS[0]}" in +0) + RESULT=pass + ;; +33) + RESULT=skip + ;; +42) + RESULT=nonet + ;; +*) + RESULT=fail + ;; +esac + +# TODO: Update page with last executed time +if [ "$RESULT" = skip ]; then + rm "$logfile" + exit 0 +fi + +DURATION=$(( $(date +%s) - $(date --date="$START_TIME" +%s) )) +SHA1="$(cd "ws/$DEFINITIONS_REF/$UPSTREAM_TROVE_ADDRESS/baserock/baserock/definitions" && git rev-parse HEAD)" + +update_report "$START_TIME" \ + "$UPSTREAM_TROVE_ADDRESS" \ + "$DEFINITIONS_REF" \ + "$SHA1" \ + "$DURATION" \ + "$RESULT" + + +# +# Copy report into server directory +# + +cp "$REPORT_PATH" "$SERVER_PATH/index.html" +mkdir "$SERVER_PATH/log" +mv "$logfile" "$SERVER_PATH/log/$SHA1--$START_TIME.log" diff --git a/extensions/mason/mason-setup.service b/extensions/mason/mason-setup.service new file mode 100644 index 00000000..60403bde --- /dev/null +++ b/extensions/mason/mason-setup.service @@ -0,0 +1,16 @@ +[Unit] +Description=Run mason-setup Ansible scripts +Requires=network.target +After=network.target +Requires=opensshd.service +After=opensshd.service + +# If there's a shared /var subvolume, it must be mounted before this +# unit runs. +Requires=local-fs.target +After=local-fs.target + +ConditionPathExists=/etc/mason/mason.conf + +[Service] +ExecStart=/usr/bin/ansible-playbook -v -i /usr/lib/mason-setup/ansible/hosts /usr/lib/mason-setup/ansible/mason-setup.yml diff --git a/extensions/mason/mason.service b/extensions/mason/mason.service new file mode 100644 index 00000000..d5c99498 --- /dev/null +++ b/extensions/mason/mason.service @@ -0,0 +1,12 @@ +[Unit] +Description=Mason: Continuous Delivery Service +After=mason-setup.service +ConditionPathIsDirectory=/srv/distbuild + +[Service] +User=root +ExecStart=/usr/lib/mason/mason-report.sh +WorkingDirectory=/srv/distbuild + +[Install] +WantedBy=multi-user.target diff --git a/extensions/mason/mason.sh b/extensions/mason/mason.sh new file mode 100755 index 00000000..dba99dfa --- /dev/null +++ b/extensions/mason/mason.sh @@ -0,0 +1,93 @@ +#!/bin/sh + +# Load OpenStack credentials +if [ -f "/etc/os.conf" ]; then + . /etc/os.conf +fi + +set -e +set -x + +# Load our deployment config +. /etc/mason.conf + +if [ ! -e ws ]; then + morph init ws +fi +cd ws + +definitions_repo="$DEFINITIONS_REF"/"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions +if [ ! -e "$definitions_repo" ]; then + morph checkout git://"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions "$DEFINITIONS_REF" + cd "$definitions_repo" + git config user.name "$TROVE_ID"-mason + git config user.email "$TROVE_ID"-mason@$(hostname) +else + cd "$definitions_repo" + SHA1_PREV="$(git rev-parse HEAD)" +fi + +if ! git remote update origin; then + echo ERROR: Unable to contact trove + exit 42 +fi +git clean -fxd +git reset --hard origin/"$DEFINITIONS_REF" + +SHA1="$(git rev-parse HEAD)" + +if [ -f "$HOME/success" ] && [ "$SHA1" = "$SHA1_PREV" ]; then + echo INFO: No changes to "$DEFINITIONS_REF", nothing to do + exit 33 +fi + +rm -f "$HOME/success" + +echo INFO: Mason building: $DEFINITIONS_REF at $SHA1 + +if ! "scripts/release-build" --no-default-configs \ + --trove-host "$UPSTREAM_TROVE_ADDRESS" \ + --artifact-cache-server "http://$ARTIFACT_CACHE_SERVER:8080/" \ + --controllers "$DISTBUILD_ARCH:$DISTBUILD_CONTROLLER_ADDRESS" \ + "$BUILD_CLUSTER_MORPHOLOGY"; then + echo ERROR: Failed to build release images + echo Build logs for chunks: + find builds -type f -exec echo {} \; -exec cat {} \; + exit 1 +fi + +releases_made="$(cd release && ls | wc -l)" +if [ "$releases_made" = 0 ]; then + echo ERROR: No release images created + exit 1 +else + echo INFO: Created "$releases_made" release images +fi + +if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then + "scripts/release-test-os" \ + --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \ + --trove-host "$UPSTREAM_TROVE_ADDRESS" \ + --trove-id "$TROVE_ID" \ + --net-id "$OPENSTACK_NETWORK_ID" \ + "$BUILD_CLUSTER_MORPHOLOGY" +elif [ "$TEST_INFRASTRUCTURE_TYPE" = "kvmhost" ]; then + "scripts/release-test" \ + --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \ + --trove-host "$UPSTREAM_TROVE_ADDRESS" \ + --trove-id "$TROVE_ID" \ + "$BUILD_CLUSTER_MORPHOLOGY" +fi + +"scripts/release-upload" --build-trove-host "$ARTIFACT_CACHE_SERVER" \ + --arch "$DISTBUILD_ARCH" \ + --log-level=debug --log="$HOME"/release-upload.log \ + --public-trove-host "$UPSTREAM_TROVE_ADDRESS" \ + --public-trove-username root \ + --public-trove-artifact-dir /home/cache/artifacts \ + --no-upload-release-artifacts \ + "$BUILD_CLUSTER_MORPHOLOGY" + +echo INFO: Artifact upload complete for $DEFINITIONS_REF at $SHA1 + +touch "$HOME/success" diff --git a/extensions/mason/mason.timer b/extensions/mason/mason.timer new file mode 100644 index 00000000..107dff97 --- /dev/null +++ b/extensions/mason/mason.timer @@ -0,0 +1,10 @@ +[Unit] +Description=Runs Mason continually with 1 min between calls + +[Timer] +#Time between Mason finishing and calling it again +OnUnitActiveSec=1min +Unit=mason.service + +[Install] +WantedBy=multi-user.target diff --git a/extensions/mason/os-init-script b/extensions/mason/os-init-script new file mode 100644 index 00000000..77afb926 --- /dev/null +++ b/extensions/mason/os-init-script @@ -0,0 +1,6 @@ +#!/bin/bash + +# This allows the test runner to know that cloud-init has completed the +# disc resizing, and there is enough free space to continue. +touch /root/cloud-init-finished + diff --git a/extensions/mason/share/mason.conf b/extensions/mason/share/mason.conf new file mode 100644 index 00000000..1295ce84 --- /dev/null +++ b/extensions/mason/share/mason.conf @@ -0,0 +1,14 @@ +# This file is generarated by the mason-setup systemd unit. +# If you want to change the configuration, change the configuration +# in /etc/mason/mason.conf and restart the service. + +ARTIFACT_CACHE_SERVER={{ ARTIFACT_CACHE_SERVER|quote }} +UPSTREAM_TROVE_ADDRESS={{ TROVE_HOST|quote }} +DEFINITIONS_REF={{ MASON_DEFINITIONS_REF|quote }} +DISTBUILD_ARCH={{ MASON_DISTBUILD_ARCH|quote }} +DISTBUILD_CONTROLLER_ADDRESS={{ CONTROLLERHOST|quote }} +TROVE_ID={{ TROVE_ID|quote }} +BUILD_CLUSTER_MORPHOLOGY={{ MASON_CLUSTER_MORPHOLOGY|quote }} +MASON_TEST_HOST={{ MASON_TEST_HOST|quote }} +TEST_INFRASTRUCTURE_TYPE={{ TEST_INFRASTRUCTURE_TYPE|quote }} +{% if OPENSTACK_NETWORK_ID is defined %}OPENSTACK_NETWORK_ID={{ OPENSTACK_NETWORK_ID|quote }}{% endif %} diff --git a/extensions/mason/share/os.conf b/extensions/mason/share/os.conf new file mode 100644 index 00000000..21ef398c --- /dev/null +++ b/extensions/mason/share/os.conf @@ -0,0 +1,30 @@ +#!/bin/bash + +# A version of this file with the relevant information included can be +# obtained by navigating to 'Access & Security' -> 'API Access' -> +# 'Download OpenStack RC file' in The Horizon web interface of your +# OpenStack. However, the file obtained from there sets OS_PASSWORD +# such that it will ask the user for a password, so you will need to +# change that for Mason to work automatically. +# +# With the addition of Keystone, to use an openstack cloud you should +# authenticate against keystone, which returns a **Token** and **Service +# Catalog**. The catalog contains the endpoint for all services the +# user/tenant has access to - including nova, glance, keystone, swift. +# +# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We +# will use the 1.1 *compute api* +export OS_AUTH_URL={{ OS_AUTH_URL|quote }} + +# With the addition of Keystone we have standardized on the term **tenant** +# as the entity that owns the resources. +export OS_TENANT_ID={{ OS_TENANT_ID|quote }} +export OS_TENANT_NAME={{ OS_TENANT_NAME|quote }} + +# In addition to the owning entity (tenant), openstack stores the entity +# performing the action as the **user**. +export OS_USERNAME={{ OS_USERNAME|quote }} + +# With Keystone you pass the keystone password. +export OS_PASSWORD={{ OS_PASSWORD|quote }} + diff --git a/extensions/moonshot-kernel.configure b/extensions/moonshot-kernel.configure new file mode 100644 index 00000000..11d01751 --- /dev/null +++ b/extensions/moonshot-kernel.configure @@ -0,0 +1,33 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to convert a plain +# kernel Image to uImage, for an HP Moonshot m400 cartridge + +set -eu + +case "$MOONSHOT_KERNEL" in + True|yes) + echo "Converting kernel image for Moonshot" + mkimage -A arm -O linux -C none -T kernel -a 0x00080000 \ + -e 0x00080000 -n Linux -d "$1/boot/vmlinux" "$1/boot/uImage" + ;; + *) + echo Unrecognised option "$MOONSHOT_KERNEL" to MOONSHOT_KERNEL + exit 1 + ;; +esac diff --git a/extensions/nfsboot-server.configure b/extensions/nfsboot-server.configure new file mode 100755 index 00000000..9fb48096 --- /dev/null +++ b/extensions/nfsboot-server.configure @@ -0,0 +1,58 @@ +#!/bin/sh +# +# Copyright (C) 2013-2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# +# This is a "morph deploy" configuration extension to set up a server for +# booting over nfs and tftp. +set -e + +ROOT="$1" + +########################################################################## + +nfsboot_root=/srv/nfsboot +tftp_root="$nfsboot_root"/tftp +nfs_root="$nfsboot_root"/nfs +mkdir -p "$ROOT$tftp_root" "$ROOT$nfs_root" + +install -D /dev/stdin "$ROOT/usr/lib/systemd/system/nfsboot-tftp.service" <. + +'''Preparatory checks for Morph 'nfsboot' write extension''' + +import cliapp +import os + +import morphlib.writeexts + + +class NFSBootCheckExtension(morphlib.writeexts.WriteExtension): + + _nfsboot_root = '/srv/nfsboot' + + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + location = args[0] + + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + raise cliapp.AppException( + 'Upgrading is not currently supported for NFS deployments.') + + hostname = os.environ.get('HOSTNAME', None) + if hostname is None: + raise cliapp.AppException('You must specify a HOSTNAME.') + if hostname == 'baserock': + raise cliapp.AppException('It is forbidden to nfsboot a system ' + 'with hostname "%s"' % hostname) + + self.test_good_server(location) + + version_label = os.getenv('VERSION_LABEL', 'factory') + versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', + version_label) + if self.version_exists(versioned_root, location): + raise cliapp.AppException( + 'Root file system for host %s (version %s) already exists on ' + 'the NFS server %s. Deployment aborted.' % (hostname, + version_label, location)) + + def test_good_server(self, server): + self.check_ssh_connectivity(server) + + # Is an NFS server + try: + cliapp.ssh_runcmd( + 'root@%s' % server, ['test', '-e', '/etc/exports']) + except cliapp.AppException: + raise cliapp.AppException('server %s is not an nfs server' + % server) + try: + cliapp.ssh_runcmd( + 'root@%s' % server, ['systemctl', 'is-enabled', + 'nfs-server.service']) + + except cliapp.AppException: + raise cliapp.AppException('server %s does not control its ' + 'nfs server by systemd' % server) + + # TFTP server exports /srv/nfsboot/tftp + tftp_root = os.path.join(self._nfsboot_root, 'tftp') + try: + cliapp.ssh_runcmd( + 'root@%s' % server, ['test' , '-d', tftp_root]) + except cliapp.AppException: + raise cliapp.AppException('server %s does not export %s' % + (tftp_root, server)) + + def version_exists(self, versioned_root, location): + try: + cliapp.ssh_runcmd('root@%s' % location, + ['test', '-d', versioned_root]) + except cliapp.AppException: + return False + + return True + + +NFSBootCheckExtension().run() diff --git a/extensions/nfsboot.configure b/extensions/nfsboot.configure new file mode 100755 index 00000000..6a68dc48 --- /dev/null +++ b/extensions/nfsboot.configure @@ -0,0 +1,30 @@ +#!/bin/sh +# Copyright (C) 2013-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +# Remove all networking interfaces. On nfsboot systems, eth0 is set up +# during kernel init, and the normal ifup@eth0.service systemd unit +# would break the NFS connection and cause the system to hang. + + +set -e +if [ "$NFSBOOT_CONFIGURE" ]; then + # Remove all networking interfaces but loopback + cat > "$1/etc/network/interfaces" <. + + +'''A Morph deployment write extension for deploying to an nfsboot server + +*** DO NOT USE *** +- This was written before 'proper' deployment mechanisms were in place +It is unlikely to work at all and will not work correctly + +Use the pxeboot write extension instead + +*** + + + +An nfsboot server is defined as a baserock system that has tftp and nfs +servers running, the tftp server is exporting the contents of +/srv/nfsboot/tftp/ and the user has sufficient permissions to create nfs roots +in /srv/nfsboot/nfs/ + +''' + + +import cliapp +import os +import glob + +import morphlib.writeexts + + +class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): + + '''Create an NFS root and kernel on TFTP during Morph's deployment. + + The location command line argument is the hostname of the nfsboot server. + The user is expected to provide the location argument + using the following syntax: + + HOST + + where: + + * HOST is the host of the nfsboot server + + The extension will connect to root@HOST via ssh to copy the kernel and + rootfs, and configure the nfs server. + + It requires root because it uses systemd, and reads/writes to /etc. + + ''' + + _nfsboot_root = '/srv/nfsboot' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + + version_label = os.getenv('VERSION_LABEL', 'factory') + hostname = os.environ['HOSTNAME'] + + versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', + version_label) + + self.copy_rootfs(temp_root, location, versioned_root, hostname) + self.copy_kernel(temp_root, location, versioned_root, version_label, + hostname) + self.configure_nfs(location, hostname) + + def create_local_state(self, location, hostname): + statedir = os.path.join(self._nfsboot_root, hostname, 'state') + subdirs = [os.path.join(statedir, 'home'), + os.path.join(statedir, 'opt'), + os.path.join(statedir, 'srv')] + cliapp.ssh_runcmd('root@%s' % location, + ['mkdir', '-p'] + subdirs) + + def copy_kernel(self, temp_root, location, versioned_root, version, + hostname): + bootdir = os.path.join(temp_root, 'boot') + image_names = ['vmlinuz', 'zImage', 'uImage'] + for name in image_names: + try_path = os.path.join(bootdir, name) + if os.path.exists(try_path): + kernel_src = try_path + break + else: + raise cliapp.AppException( + 'Could not find a kernel in the system: none of ' + '%s found' % ', '.join(image_names)) + + kernel_dest = os.path.join(versioned_root, 'orig', 'kernel') + rsync_dest = 'root@%s:%s' % (location, kernel_dest) + self.status(msg='Copying kernel') + cliapp.runcmd( + ['rsync', '-s', kernel_src, rsync_dest]) + + # Link the kernel to the right place + self.status(msg='Creating links to kernel in tftp directory') + tftp_dir = os.path.join(self._nfsboot_root , 'tftp') + versioned_kernel_name = "%s-%s" % (hostname, version) + kernel_name = hostname + try: + cliapp.ssh_runcmd('root@%s' % location, + ['ln', '-f', kernel_dest, + os.path.join(tftp_dir, versioned_kernel_name)]) + + cliapp.ssh_runcmd('root@%s' % location, + ['ln', '-sf', versioned_kernel_name, + os.path.join(tftp_dir, kernel_name)]) + except cliapp.AppException: + raise cliapp.AppException('Could not create symlinks to the ' + 'kernel at %s in %s on %s' + % (kernel_dest, tftp_dir, location)) + + def copy_rootfs(self, temp_root, location, versioned_root, hostname): + rootfs_src = temp_root + '/' + orig_path = os.path.join(versioned_root, 'orig') + run_path = os.path.join(versioned_root, 'run') + + self.status(msg='Creating destination directories') + try: + cliapp.ssh_runcmd('root@%s' % location, + ['mkdir', '-p', orig_path, run_path]) + except cliapp.AppException: + raise cliapp.AppException('Could not create dirs %s and %s on %s' + % (orig_path, run_path, location)) + + self.status(msg='Creating \'orig\' rootfs') + cliapp.runcmd( + ['rsync', '-asXSPH', '--delete', rootfs_src, + 'root@%s:%s' % (location, orig_path)]) + + self.status(msg='Creating \'run\' rootfs') + try: + cliapp.ssh_runcmd('root@%s' % location, + ['rm', '-rf', run_path]) + cliapp.ssh_runcmd('root@%s' % location, + ['cp', '-al', orig_path, run_path]) + cliapp.ssh_runcmd('root@%s' % location, + ['rm', '-rf', os.path.join(run_path, 'etc')]) + cliapp.ssh_runcmd('root@%s' % location, + ['cp', '-a', os.path.join(orig_path, 'etc'), + os.path.join(run_path, 'etc')]) + except cliapp.AppException: + raise cliapp.AppException('Could not create \'run\' rootfs' + ' from \'orig\'') + + self.status(msg='Linking \'default\' to latest system') + try: + cliapp.ssh_runcmd('root@%s' % location, + ['ln', '-sfn', versioned_root, + os.path.join(self._nfsboot_root, hostname, 'systems', + 'default')]) + except cliapp.AppException: + raise cliapp.AppException('Could not link \'default\' to %s' + % versioned_root) + + def configure_nfs(self, location, hostname): + exported_path = os.path.join(self._nfsboot_root, hostname) + exports_path = '/etc/exports' + # If that path is not already exported: + try: + cliapp.ssh_runcmd( + 'root@%s' % location, ['grep', '-q', exported_path, + exports_path]) + except cliapp.AppException: + ip_mask = '*' + options = 'rw,no_subtree_check,no_root_squash,async' + exports_string = '%s %s(%s)\n' % (exported_path, ip_mask, options) + exports_append_sh = '''\ +set -eu +target="$1" +temp=$(mktemp) +cat "$target" > "$temp" +cat >> "$temp" +mv "$temp" "$target" +''' + cliapp.ssh_runcmd( + 'root@%s' % location, + ['sh', '-c', exports_append_sh, '--', exports_path], + feed_stdin=exports_string) + cliapp.ssh_runcmd( + 'root@%s' % location, ['systemctl', 'restart', + 'nfs-server.service']) + + +NFSBootWriteExtension().run() diff --git a/extensions/nfsboot.write.help b/extensions/nfsboot.write.help new file mode 100644 index 00000000..186c479a --- /dev/null +++ b/extensions/nfsboot.write.help @@ -0,0 +1,33 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + *** DO NOT USE *** + - This was written before 'proper' deployment mechanisms were in place. + It is unlikely to work at all, and will not work correctly. + + Use the pxeboot write extension instead + + *** + Deploy a system image and kernel to an nfsboot server. + + An nfsboot server is defined as a baserock system that has + tftp and nfs servers running, the tftp server is exporting + the contents of /srv/nfsboot/tftp/ and the user has sufficient + permissions to create nfs roots in /srv/nfsboot/nfs/. + + The `location` argument is the hostname of the nfsboot server. + + The extension will connect to root@HOST via ssh to copy the + kernel and rootfs, and configure the nfs server. diff --git a/extensions/openstack-ceilometer.configure b/extensions/openstack-ceilometer.configure new file mode 100644 index 00000000..9c0b7b6d --- /dev/null +++ b/extensions/openstack-ceilometer.configure @@ -0,0 +1,120 @@ +#!/bin/sh + +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool CEILOMETER_ENABLE_CONTROLLER +check_bool CEILOMETER_ENABLE_COMPUTE + +if ! "$CEILOMETER_ENABLE_CONTROLLER" && \ + ! "$CEILOMETER_ENABLE_COMPUTE"; then + exit 0 +fi + +if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ + -z "$CEILOMETER_SERVICE_USER" -o \ + -z "$CEILOMETER_SERVICE_PASSWORD" -o \ + -z "$CEILOMETER_DB_USER" -o \ + -z "$CEILOMETER_DB_PASSWORD" -o \ + -z "$METERING_SECRET" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$CONTROLLER_HOST_ADDRESS" ]; then + echo Some options required for Ceilometer were defined, but not all. + exit 1 +fi + +###################################### +# Enable relevant openstack services # +###################################### + +if "$CEILOMETER_ENABLE_COMPUTE" || "$CEILOMETER_ENABLE_CONTROLLER"; then + enable openstack-ceilometer-config-setup +fi +if "$CEILOMETER_ENABLE_COMPUTE"; then + enable openstack-ceilometer-compute +fi +if "$CEILOMETER_ENABLE_CONTROLLER"; then + enable openstack-ceilometer-db-setup + enable openstack-ceilometer-api + enable openstack-ceilometer-collector + enable openstack-ceilometer-notification + enable openstack-ceilometer-central + enable openstack-ceilometer-alarm-evaluator + enable openstack-ceilometer-alarm-notifier +fi + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/ceilometer.conf" +import os, sys, yaml + +ceilometer_configuration={ + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + 'CEILOMETER_SERVICE_PASSWORD': os.environ['CEILOMETER_SERVICE_PASSWORD'], + 'CEILOMETER_SERVICE_USER': os.environ['CEILOMETER_SERVICE_USER'], + 'CEILOMETER_DB_USER': os.environ['CEILOMETER_DB_USER'], + 'CEILOMETER_DB_PASSWORD': os.environ['CEILOMETER_DB_PASSWORD'], + 'METERING_SECRET': os.environ['METERING_SECRET'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], +} + +yaml.dump(ceilometer_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-cinder.configure b/extensions/openstack-cinder.configure new file mode 100644 index 00000000..4c32e11a --- /dev/null +++ b/extensions/openstack-cinder.configure @@ -0,0 +1,125 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool CINDER_ENABLE_CONTROLLER +check_bool CINDER_ENABLE_COMPUTE +check_bool CINDER_ENABLE_STORAGE + +if ! "$CINDER_ENABLE_CONTROLLER" && \ + ! "$CINDER_ENABLE_COMPUTE" && \ + ! "$CINDER_ENABLE_STORAGE"; then + exit 0 +fi + +if [ -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ + -z "$CINDER_DB_USER" -o \ + -z "$CINDER_DB_PASSWORD" -o \ + -z "$CONTROLLER_HOST_ADDRESS" -o \ + -z "$CINDER_SERVICE_USER" -o \ + -z "$CINDER_SERVICE_PASSWORD" -o \ + -z "$CINDER_DEVICE" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" ]; then + echo Some options required for Cinder were defined, but not all. + exit 1 +fi + +###################################### +# Enable relevant openstack services # +###################################### + +if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_STORAGE"; then + enable iscsi-setup + enable target #target.service! + enable iscsid +fi +if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_CONTROLLER" || "$CINDER_ENABLE_STORAGE"; then + enable openstack-cinder-config-setup +fi +if "$CINDER_ENABLE_STORAGE"; then + enable openstack-cinder-lv-setup + enable lvm2-lvmetad + enable openstack-cinder-volume + enable openstack-cinder-backup + enable openstack-cinder-scheduler +fi +if "$CINDER_ENABLE_CONTROLLER"; then + enable openstack-cinder-db-setup + enable openstack-cinder-api +fi + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/cinder.conf" +import os, sys, yaml + +cinder_configuration={ + 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER':os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'], + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN':os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + 'CINDER_DB_USER':os.environ['CINDER_DB_USER'], + 'CINDER_DB_PASSWORD':os.environ['CINDER_DB_PASSWORD'], + 'CONTROLLER_HOST_ADDRESS':os.environ['CONTROLLER_HOST_ADDRESS'], + 'CINDER_SERVICE_USER':os.environ['CINDER_SERVICE_USER'], + 'CINDER_SERVICE_PASSWORD':os.environ['CINDER_SERVICE_PASSWORD'], + 'CINDER_DEVICE':os.environ['CINDER_DEVICE'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS':os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], +} + +yaml.dump(cinder_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-glance.configure b/extensions/openstack-glance.configure new file mode 100644 index 00000000..5da08895 --- /dev/null +++ b/extensions/openstack-glance.configure @@ -0,0 +1,101 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool GLANCE_ENABLE_SERVICE + +if ! "$GLANCE_ENABLE_SERVICE"; then + exit 0 +fi + +if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ + -z "$GLANCE_SERVICE_USER" -o \ + -z "$GLANCE_SERVICE_PASSWORD" -o \ + -z "$GLANCE_DB_USER" -o \ + -z "$GLANCE_DB_PASSWORD" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$CONTROLLER_HOST_ADDRESS" ]; then + echo Some options required for Glance were defined, but not all. + exit 1 +fi + +###################################### +# Enable relevant openstack services # +###################################### + +enable openstack-glance-setup + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/glance.conf" +import os, sys, yaml + +glance_configuration={ + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + 'GLANCE_SERVICE_PASSWORD': os.environ['GLANCE_SERVICE_PASSWORD'], + 'GLANCE_SERVICE_USER': os.environ['GLANCE_SERVICE_USER'], + 'GLANCE_DB_USER': os.environ['GLANCE_DB_USER'], + 'GLANCE_DB_PASSWORD': os.environ['GLANCE_DB_PASSWORD'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], +} + +yaml.dump(glance_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-ironic.configure b/extensions/openstack-ironic.configure new file mode 100644 index 00000000..962bbcd1 --- /dev/null +++ b/extensions/openstack-ironic.configure @@ -0,0 +1,155 @@ +#!/bin/sh + +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool IRONIC_ENABLE_SERVICE + +if ! "$IRONIC_ENABLE_SERVICE"; then + exit 0 +fi + +if [ -z "$IRONIC_SERVICE_USER" -o \ + -z "$IRONIC_SERVICE_PASSWORD" -o \ + -z "$IRONIC_DB_USER" -o \ + -z "$IRONIC_DB_PASSWORD" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$CONTROLLER_HOST_ADDRESS" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then + echo Some options required for Ironic were defined, but not all. + exit 1 +fi + +###################################### +# Enable relevant openstack services # +###################################### + +enable openstack-ironic-setup +enable iscsi-setup +enable target #target.service! +enable iscsid + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/ironic.conf" +import os, sys, yaml + +ironic_configuration={ + 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'], + 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'], + 'IRONIC_DB_USER': os.environ['IRONIC_DB_USER'], + 'IRONIC_DB_PASSWORD': os.environ['IRONIC_DB_PASSWORD'], + 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER':os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + +} + +yaml.dump(ironic_configuration, sys.stdout, default_flow_style=False) +EOF + +########################################################################## +# Configure the TFTP service # +########################################################################## + +tftp_root="/srv/tftp_root/" # trailing slash is essential +mkdir -p "$ROOT/$tftp_root" + +install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.service" << 'EOF' +[Unit] +Description=tftp service for booting kernels +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +EnvironmentFile=/etc/tftp-hpa.conf +ExecStart=/usr/sbin/in.tftpd $TFTP_OPTIONS ${TFTP_ROOT} +StandardInput=socket +StandardOutput=inherit +StandardError=journal + +[Install] +WantedBy=multi-user.target +EOF + +install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.socket" << EOF +[Unit] +Description=Tftp server activation socket + +[Socket] +ListenDatagram=$MANAGEMENT_INTERFACE_IP_ADDRESS:69 +FreeBind=yes + +[Install] +WantedBy=sockets.target +EOF + +install -D -m 644 /dev/stdin "$ROOT"/etc/tftp-hpa.conf << EOF +TFTP_ROOT=$tftp_root +TFTP_OPTIONS="-v -v -v -v -v --map-file $tftp_root/map-file" +EOF + +install -D /dev/stdin -m 644 "$ROOT/$tftp_root"/map-file << EOF +r ^([^/]) $tftp_root\1 +r ^/tftpboot/ $tftp_root\2 +EOF + +cp "$ROOT"/usr/share/syslinux/pxelinux.0 "$ROOT/$tftp_root" diff --git a/extensions/openstack-keystone.configure b/extensions/openstack-keystone.configure new file mode 100644 index 00000000..6b011b14 --- /dev/null +++ b/extensions/openstack-keystone.configure @@ -0,0 +1,123 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool KEYSTONE_ENABLE_SERVICE + +if ! "$KEYSTONE_ENABLE_SERVICE"; then + exit 0 +fi + +if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ + -z "$KEYSTONE_ADMIN_PASSWORD" -o \ + -z "$KEYSTONE_DB_USER" -o \ + -z "$KEYSTONE_DB_PASSWORD" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$CONTROLLER_HOST_ADDRESS" ]; then + echo Some options required for Keystone were defined, but not all. + exit 1 +fi + +python <<'EOF' +import socket +import sys +import os + +try: + socket.inet_pton(socket.AF_INET, os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS']) +except: + print "Error: MANAGEMENT_INTERFACE_IP_ADDRESS is not a valid IP" + sys.exit(1) +EOF + +###################################### +# Enable relevant openstack services # +###################################### + +enable openstack-keystone-setup +enable openstack-horizon-setup +enable postgres-server-setup + +########################################################################## +# Generate configuration file +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/keystone.conf" +import os, sys, yaml + +keystone_configuration={ + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], + 'KEYSTONE_ADMIN_PASSWORD': os.environ['KEYSTONE_ADMIN_PASSWORD'], + 'KEYSTONE_DB_USER': os.environ['KEYSTONE_DB_USER'], + 'KEYSTONE_DB_PASSWORD': os.environ['KEYSTONE_DB_PASSWORD'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], +} + +yaml.dump(keystone_configuration, sys.stdout, default_flow_style=False) +EOF + +python << 'EOF' > "$OPENSTACK_DATA/postgres.conf" +import os, sys, yaml + +postgres_configuration={ + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], +} + +yaml.dump(postgres_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-network.configure b/extensions/openstack-network.configure new file mode 100644 index 00000000..10be5a1c --- /dev/null +++ b/extensions/openstack-network.configure @@ -0,0 +1,50 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" +} + +################### +# Enable services # +################### + +enable openvswitch-setup +enable openstack-network-setup + +########################################################################## +# Generate config variable shell snippet +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/network.conf" +import os, sys, yaml + +network_configuration = {} + +optional_keys = ('EXTERNAL_INTERFACE',) + +network_configuration.update((k, os.environ[k]) for k in optional_keys if k in os.environ) + +yaml.dump(network_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-neutron.configure b/extensions/openstack-neutron.configure new file mode 100644 index 00000000..210222db --- /dev/null +++ b/extensions/openstack-neutron.configure @@ -0,0 +1,138 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/openstack-neutron-$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-neutron-$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool NEUTRON_ENABLE_CONTROLLER +check_bool NEUTRON_ENABLE_MANAGER +check_bool NEUTRON_ENABLE_AGENT + +if ! "$NEUTRON_ENABLE_CONTROLLER" && \ + ! "$NEUTRON_ENABLE_MANAGER" && \ + ! "$NEUTRON_ENABLE_AGENT"; then + exit 0 +fi + +if [ -z "$NEUTRON_SERVICE_USER" -o \ + -z "$NEUTRON_SERVICE_PASSWORD" -o \ + -z "$NEUTRON_DB_USER" -o \ + -z "$NEUTRON_DB_PASSWORD" -o \ + -z "$METADATA_PROXY_SHARED_SECRET" -o \ + -z "$NOVA_SERVICE_USER" -o \ + -z "$NOVA_SERVICE_PASSWORD" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$CONTROLLER_HOST_ADDRESS" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then + echo Some options required for Neutron were defined, but not all. + exit 1 +fi + +############################################# +# Ensure /var/run is an appropriate symlink # +############################################# + +if ! link="$(readlink "$ROOT/var/run")" || [ "$link" != ../run ]; then + rm -rf "$ROOT/var/run" + ln -s ../run "$ROOT/var/run" +fi + +################### +# Enable services # +################### + +if "$NEUTRON_ENABLE_CONTROLLER"; then + enable config-setup + enable db-setup + enable server +fi + +if "$NEUTRON_ENABLE_MANAGER"; then + enable config-setup + enable ovs-cleanup + enable dhcp-agent + enable l3-agent + enable plugin-openvswitch-agent + enable metadata-agent +fi + +if "$NEUTRON_ENABLE_AGENT"; then + enable config-setup + enable plugin-openvswitch-agent +fi + +########################################################################## +# Generate config variable shell snippet +########################################################################## + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/neutron.conf" +import os, sys, yaml + +nova_configuration={ + 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'], + 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'], + 'NEUTRON_DB_USER': os.environ['NEUTRON_DB_USER'], + 'NEUTRON_DB_PASSWORD': os.environ['NEUTRON_DB_PASSWORD'], + 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'], + 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'], + 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], +} + +yaml.dump(nova_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-nova.configure b/extensions/openstack-nova.configure new file mode 100644 index 00000000..213f1852 --- /dev/null +++ b/extensions/openstack-nova.configure @@ -0,0 +1,168 @@ +#!/bin/sh + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +ROOT="$1" + +enable(){ + ln -sf "/usr/lib/systemd/system/openstack-nova-$1.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-$1.service" +} + +unnaceptable(){ + eval echo Unexpected value \$$1 for $1 >&2 + exit 1 +} + +check_bool(){ + case "$(eval echo \"\$$1\")" in + True|'') + eval "$1=true" + ;; + False) + eval "$1=false" + ;; + *) + unnaceptable "$1" + ;; + esac +} + +########################################################################## +# Check variables +########################################################################## + +check_bool NOVA_ENABLE_CONTROLLER +check_bool NOVA_ENABLE_COMPUTE + +if ! "$NOVA_ENABLE_CONTROLLER" && \ + ! "$NOVA_ENABLE_COMPUTE"; then + exit 0 +fi + +if [ -z "$NOVA_SERVICE_USER" -o \ + -z "$NOVA_SERVICE_PASSWORD" -o \ + -z "$NOVA_DB_USER" -o \ + -z "$NOVA_DB_PASSWORD" -o \ + -z "$NOVA_VIRT_TYPE" -o \ + -z "$NEUTRON_SERVICE_USER" -o \ + -z "$NEUTRON_SERVICE_PASSWORD" -o \ + -z "$IRONIC_SERVICE_USER" -a \ + -z "$IRONIC_SERVICE_PASSWORD" -a \ + -z "$METADATA_PROXY_SHARED_SECRET" -o \ + -z "$RABBITMQ_HOST" -o \ + -z "$RABBITMQ_USER" -o \ + -z "$RABBITMQ_PASSWORD" -o \ + -z "$RABBITMQ_PORT" -o \ + -z "$CONTROLLER_HOST_ADDRESS" -o \ + -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ + -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then + echo Some options required for Nova were defined, but not all. + exit 1 +fi + +############################################### +# Enable libvirtd and libvirt-guests services # +############################################### + +wants_dir="$ROOT"/usr/lib/systemd/system/multi-user.target.wants +mkdir -p "$wants_dir" +mkdir -p "$ROOT"/var/lock/subsys +ln -sf ../libvirtd.service "$wants_dir/libvirtd.service" + +###################################### +# Enable relevant openstack services # +###################################### + +if "$NOVA_ENABLE_CONTROLLER" || "$NOVA_ENABLE_COMPUTE"; then + enable config-setup +fi +if "$NOVA_ENABLE_CONTROLLER" && ! "$NOVA_ENABLE_COMPUTE"; then + enable conductor +fi +if "$NOVA_ENABLE_COMPUTE"; then + enable compute +fi +if "$NOVA_ENABLE_CONTROLLER"; then + for service in db-setup api cert consoleauth novncproxy scheduler serialproxy; do + enable "$service" + done +fi + +########################################################################## +# Change iprange for the interal libvirt to avoid clashes +# with eth0 ip range +########################################################################## + +sed -i "s/192\.168\.122\./192\.168\.1\./g" \ + "$ROOT"/etc/libvirt/qemu/networks/default.xml + + +########################################################################## +# Generate configuration file +########################################################################## + +case "$NOVA_BAREMETAL_SCHEDULING" in + True|true|yes) + export COMPUTE_MANAGER=ironic.nova.compute.manager.ClusteredComputeManager + export RESERVED_HOST_MEMORY_MB=0 + export SCHEDULER_HOST_MANAGER=nova.scheduler.ironic_host_manager.IronicHostManager + export RAM_ALLOCATION_RATIO=1.0 + export COMPUTE_DRIVER=nova.virt.ironic.IronicDriver + ;; + *) + export COMPUTE_MANAGER=nova.compute.manager.ComputeManager + export RESERVED_HOST_MEMORY_MB=512 + export SCHEDULER_HOST_MANAGER=nova.scheduler.host_manager.HostManager + export RAM_ALLOCATION_RATIO=1.5 + export COMPUTE_DRIVER=libvirt.LibvirtDriver + ;; +esac + +OPENSTACK_DATA="$ROOT/etc/openstack" +mkdir -p "$OPENSTACK_DATA" + +python <<'EOF' >"$OPENSTACK_DATA/nova.conf" +import os, sys, yaml + +nova_configuration={ + 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'], + 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'], + 'NOVA_DB_USER': os.environ['NOVA_DB_USER'], + 'NOVA_DB_PASSWORD': os.environ['NOVA_DB_PASSWORD'], + 'NOVA_VIRT_TYPE': os.environ['NOVA_VIRT_TYPE'], + 'COMPUTE_MANAGER': os.environ['COMPUTE_MANAGER'], + 'RESERVED_HOST_MEMORY_MB': os.environ['RESERVED_HOST_MEMORY_MB'], + 'SCHEDULER_HOST_MANAGER': os.environ['SCHEDULER_HOST_MANAGER'], + 'RAM_ALLOCATION_RATIO': os.environ['RAM_ALLOCATION_RATIO'], + 'COMPUTE_DRIVER': os.environ['COMPUTE_DRIVER'], + 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'], + 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'], + 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'], + 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'], + 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'], + 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], + 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], + 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], + 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], + 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], + 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], + 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], +} + +yaml.dump(nova_configuration, sys.stdout, default_flow_style=False) +EOF diff --git a/extensions/openstack-swift-controller.configure b/extensions/openstack-swift-controller.configure new file mode 100644 index 00000000..424ab57b --- /dev/null +++ b/extensions/openstack-swift-controller.configure @@ -0,0 +1,49 @@ +#!/bin/bash +# +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +set -e + +export ROOT="$1" + +MANDATORY_OPTIONS="SWIFT_ADMIN_PASSWORD KEYSTONE_TEMPORARY_ADMIN_TOKEN" + +for option in $MANDATORY_OPTIONS +do + if ! [[ -v $option ]] + then + missing_option=True + echo "Required option $option isn't set!" >&2 + fi +done + +if [[ $missing_option = True ]]; then exit 1; fi + +mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks + +ln -s "/usr/lib/systemd/system/swift-controller-setup.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-controller-setup.service" +ln -s "/usr/lib/systemd/system/memcached.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/memcached.service" +ln -s "/usr/lib/systemd/system/openstack-swift-proxy.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-proxy.service" + +cat << EOF > "$ROOT"/usr/share/openstack/swift-controller-vars.yml +--- +SWIFT_ADMIN_PASSWORD: $SWIFT_ADMIN_PASSWORD +MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS +KEYSTONE_TEMPORARY_ADMIN_TOKEN: $KEYSTONE_TEMPORARY_ADMIN_TOKEN +EOF diff --git a/extensions/openstack.check b/extensions/openstack.check new file mode 100755 index 00000000..a3379763 --- /dev/null +++ b/extensions/openstack.check @@ -0,0 +1,90 @@ +#!/usr/bin/python +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +'''Preparatory checks for Morph 'openstack' write extension''' + +import cliapp +import os +import urlparse +import keystoneclient + +import morphlib.writeexts + + +class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): + + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + self.require_btrfs_in_deployment_host_kernel() + + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + raise cliapp.AppException( + 'Use the `ssh-rsync` write extension to deploy upgrades to an ' + 'existing remote system.') + + location = args[0] + self.check_location(location) + + self.check_imagename() + self.check_openstack_parameters(self._get_auth_parameters(location)) + + def _get_auth_parameters(self, location): + '''Check the environment variables needed and returns all. + + The environment variables are described in the class documentation. + ''' + + auth_keys = {'OPENSTACK_USER': 'username', + 'OPENSTACK_TENANT': 'tenant_name', + 'OPENSTACK_PASSWORD': 'password'} + + for key in auth_keys: + if os.environ.get(key, '') == '': + raise cliapp.AppException(key + ' was not given') + + auth_params = {auth_keys[key]: os.environ[key] for key in auth_keys} + auth_params['auth_url'] = location + return auth_params + + def check_imagename(self): + if os.environ.get('OPENSTACK_IMAGENAME', '') == '': + raise cliapp.AppException('OPENSTACK_IMAGENAME was not given') + + def check_location(self, location): + x = urlparse.urlparse(location) + if x.scheme not in ['http', 'https']: + raise cliapp.AppException('URL schema must be http or https in %s'\ + % location) + if (x.path != '/v2.0' and x.path != '/v2.0/'): + raise cliapp.AppException('API version must be v2.0 in %s'\ + % location) + + def check_openstack_parameters(self, auth_params): + ''' Check that we can connect to and authenticate with openstack ''' + + self.status(msg='Checking OpenStack credentials...') + + try: + keystoneclient.v2_0.Client(**auth_params) + except keystoneclient.exceptions.Unauthorized: + errmsg = ('Failed to authenticate with OpenStack ' + '(are your credentials correct?)') + raise cliapp.AppException(errmsg) + + +OpenStackCheckExtension().run() diff --git a/extensions/openstack.write b/extensions/openstack.write new file mode 100755 index 00000000..67e07c18 --- /dev/null +++ b/extensions/openstack.write @@ -0,0 +1,93 @@ +#!/usr/bin/python +# Copyright (C) 2013-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +'''A Morph deployment write extension for deploying to OpenStack.''' + + +import cliapp +import os +import tempfile +import urlparse + +import morphlib.writeexts + + +class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): + + '''See openstack.write.help for documentation''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + + os_params = self.get_openstack_parameters() + + fd, raw_disk = tempfile.mkstemp() + os.close(fd) + self.create_local_system(temp_root, raw_disk) + self.status(msg='Temporary disk image has been created at %s' + % raw_disk) + + self.set_extlinux_root_to_virtio(raw_disk) + + self.configure_openstack_image(raw_disk, location, os_params) + + def set_extlinux_root_to_virtio(self, raw_disk): + '''Re-configures extlinux to use virtio disks''' + self.status(msg='Updating extlinux.conf') + with self.mount(raw_disk) as mp: + path = os.path.join(mp, 'extlinux.conf') + + with open(path) as f: + extlinux_conf = f.read() + + extlinux_conf = extlinux_conf.replace('root=/dev/sda', + 'root=/dev/vda') + with open(path, "w") as f: + f.write(extlinux_conf) + + def get_openstack_parameters(self): + '''Get the environment variables needed. + + The environment variables are described in the class documentation. + ''' + + keys = ('OPENSTACK_USER', 'OPENSTACK_TENANT', + 'OPENSTACK_IMAGENAME', 'OPENSTACK_PASSWORD') + return (os.environ[key] for key in keys) + + def configure_openstack_image(self, raw_disk, auth_url, os_params): + '''Configure the image in OpenStack using glance-client''' + self.status(msg='Configuring OpenStack image...') + + username, tenant_name, image_name, password = os_params + cmdline = ['glance', + '--os-username', username, + '--os-tenant-name', tenant_name, + '--os-password', password, + '--os-auth-url', auth_url, + 'image-create', + '--name=%s' % image_name, + '--disk-format=raw', + '--container-format', 'bare', + '--file', raw_disk] + cliapp.runcmd(cmdline) + + self.status(msg='Image configured.') + +OpenStackWriteExtension().run() diff --git a/extensions/openstack.write.help b/extensions/openstack.write.help new file mode 100644 index 00000000..26983060 --- /dev/null +++ b/extensions/openstack.write.help @@ -0,0 +1,51 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + + Deploy a Baserock system as a *new* OpenStack virtual machine. + (Use the `ssh-rsync` write extension to deploy upgrades to an *existing* + VM) + + Deploys the system to the OpenStack host using python-glanceclient. + + Parameters: + + * location: the authentication url of the OpenStack server using the + following syntax: + + http://HOST:PORT/VERSION + + where + + * HOST is the host running OpenStack + * PORT is the port which is using OpenStack for authentications. + * VERSION is the authentication version of OpenStack (Only v2.0 + supported) + + * OPENSTACK_USER=username: the username to use in the `--os-username` + argument to `glance`. + + * OPENSTACK_TENANT=tenant: the project name to use in the + `--os-tenant-name` argument to `glance`. + + * OPENSTACK_IMAGENAME=imagename: the name of the image to use in the + `--name` argument to `glance`. + + * OPENSTACK_PASSWORD=password: the password of the OpenStack user. (We + recommend passing this on the command-line, rather than setting an + environment variable or storing it in a cluster cluster definition file.) + + (See `morph help deploy` for details of how to pass parameters to write + extensions) diff --git a/extensions/pxeboot.check b/extensions/pxeboot.check new file mode 100755 index 00000000..611708a9 --- /dev/null +++ b/extensions/pxeboot.check @@ -0,0 +1,86 @@ +#!/usr/bin/python + +import itertools +import os +import subprocess +import sys +flatten = itertools.chain.from_iterable + +def powerset(iterable): + "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" + s = list(iterable) + return flatten(itertools.combinations(s, r) for r in range(len(s)+1)) + +valid_option_sets = frozenset(( + ('spawn-novlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',))), + ('spawn-vlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', 'PXEBOOT_VLAN'))), + ('existing-dhcp', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', + 'PXEBOOT_CONFIG_TFTP_ADDRESS'))), + ('existing-server', frozenset(('PXEBOOT_CONFIG_TFTP_ADDRESS', + 'PXEBOOT_ROOTFS_RSYNC_ADDRESS'))), +)) +valid_modes = frozenset(mode for mode, opt_set in valid_option_sets) + + +def compute_matches(env): + complete_matches = set() + for mode, opt_set in valid_option_sets: + if all(k in env for k in opt_set): + complete_matches.add(opt_set) + return complete_matches + +complete_matches = compute_matches(os.environ) + +def word_separate_options(options): + assert options + s = options.pop(-1) + if options: + s = '%s and %s' % (', '.join(options), s) + return s + + +valid_options = frozenset(flatten(opt_set for (mode, opt_set) + in valid_option_sets)) +matched_options = frozenset(o for o in valid_options + if o in os.environ) +if not complete_matches: + addable_sets = frozenset(frozenset(os) - matched_options for os in + valid_options + if frozenset(os) - matched_options) + print('Please provide %s' % ' or '.join( + word_separate_options(list(opt_set)) + for opt_set in addable_sets if opt_set)) + sys.exit(1) +elif len(complete_matches) > 1: + removable_sets = frozenset(matched_options - frozenset(os) for os in + powerset(matched_options) + if len(compute_matches(os)) == 1) + print('WARNING: Following options might not be needed: %s' % ' or '.join( + word_separate_options(list(opt_set)) + for opt_set in removable_sets if opt_set)) + +if 'PXEBOOT_MODE' in os.environ: + mode = os.environ['PXEBOOT_MODE'] +else: + try: + mode, = (mode for (mode, opt_set) in valid_option_sets + if all(o in os.environ for o in opt_set)) + + except ValueError as e: + print ('More than one candidate for PXEBOOT_MODE, please ' + 'set a value for it. Type `morph help pxeboot.write for ' + 'more info') + sys.exit(1) + +if mode not in valid_modes: + print('%s is not a valid PXEBOOT_MODE' % mode) + sys.exit(1) + +if mode != 'existing-server': + with open(os.devnull, 'w') as devnull: + if subprocess.call(['systemctl', 'is-active', 'nfs-server'], + stdout=devnull) != 0: + print ('ERROR: nfs-server.service is not running and is needed ' + 'for this deployment. Please, run `systemctl start nfs-server` ' + 'and try `morph deploy` again.') + sys.exit(1) diff --git a/extensions/pxeboot.write b/extensions/pxeboot.write new file mode 100644 index 00000000..3a12ebcc --- /dev/null +++ b/extensions/pxeboot.write @@ -0,0 +1,755 @@ +#!/usr/bin/env python + + +import collections +import contextlib +import errno +import itertools +import logging +import os +import select +import signal +import shutil +import socket +import string +import StringIO +import subprocess +import sys +import tempfile +import textwrap +import urlparse + +import cliapp + +import morphlib + + +def _int_to_quad_dot(i): + return '.'.join(( + str(i >> 24 & 0xff), + str(i >> 16 & 0xff), + str(i >> 8 & 0xff), + str(i & 0xff))) + + +def _quad_dot_to_int(s): + i = 0 + for octet in s.split('.'): + i <<= 8 + i += int(octet, 10) + return i + + +def _netmask_to_prefixlen(mask): + bs = '{:032b}'.format(mask) + prefix = bs.rstrip('0') + if '0' in prefix: + raise ValueError('abnormal netmask: %s' % + _int_to_quad_dot(mask)) + return len(prefix) + + +def _get_routes(): + routes = [] + with open('/proc/net/route', 'r') as f: + for line in list(f)[1:]: + fields = line.split() + destination, flags, mask = fields[1], fields[3], fields[7] + flags = int(flags, 16) + if flags & 2: + # default route, ignore + continue + destination = socket.ntohl(int(destination, 16)) + mask = socket.ntohl(int(mask, 16)) + prefixlen = _netmask_to_prefixlen(mask) + routes.append((destination, prefixlen)) + return routes + + +class IPRange(object): + def __init__(self, prefix, prefixlen): + self.prefixlen = prefixlen + mask = (1 << prefixlen) - 1 + self.mask = mask << (32 - prefixlen) + self.prefix = prefix & self.mask + @property + def bitstring(self): + return ('{:08b}' * 4).format( + self.prefix >> 24 & 0xff, + self.prefix >> 16 & 0xff, + self.prefix >> 8 & 0xff, + self.prefix & 0xff + )[:self.prefixlen] + def startswith(self, other_range): + return self.bitstring.startswith(other_range.bitstring) + + +def find_subnet(valid_ranges, invalid_ranges): + for vr in valid_ranges: + known_subnets = set(ir for ir in invalid_ranges if ir.startswith(vr)) + prefixlens = set(r.prefixlen for r in known_subnets) + prefixlens.add(32 - 2) # need at least 4 addresses in subnet + prefixlen = min(prefixlens) + if prefixlen <= vr.prefixlen: + # valid subnet is full, move on to next + continue + subnetlen = prefixlen - vr.prefixlen + for prefix in (subnetid + vr.prefix + for subnetid in xrange(1 << subnetlen)): + if any(subnet.prefix == prefix for subnet in known_subnets): + continue + return prefix, prefixlen + + +def _normalise_macaddr(macaddr): + '''pxelinux.0 wants the mac address to be lowercase and - separated''' + digits = (c for c in macaddr.lower() if c in string.hexdigits) + nibble_pairs = grouper(digits, 2) + return '-'.join(''.join(byte) for byte in nibble_pairs) + + +@contextlib.contextmanager +def executor(target_pid): + 'Kills a process if its parent dies' + read_fd, write_fd = os.pipe() + helper_pid = os.fork() + if helper_pid == 0: + try: + os.close(write_fd) + while True: + rlist, _, _ = select.select([read_fd], [], []) + if read_fd in rlist: + d = os.read(read_fd, 1) + if not d: + os.kill(target_pid, signal.SIGKILL) + if d in ('', 'Q'): + os._exit(0) + else: + os._exit(1) + except BaseException as e: + import traceback + traceback.print_exc() + os._exit(1) + os.close(read_fd) + yield + os.write(write_fd, 'Q') + os.close(write_fd) + + +def grouper(iterable, n, fillvalue=None): + "Collect data into fixed-length chunks or blocks" + # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" + args = [iter(iterable)] * n + return itertools.izip_longest(*args, fillvalue=fillvalue) + + +class PXEBoot(morphlib.writeexts.WriteExtension): + @contextlib.contextmanager + def _vlan(self, interface, vlan): + viface = '%s.%s' % (interface, vlan) + self.status(msg='Creating vlan %(viface)s', viface=viface) + subprocess.check_call(['vconfig', 'add', interface, str(vlan)]) + try: + yield viface + finally: + self.status(msg='Destroying vlan %(viface)s', viface=viface) + subprocess.call(['vconfig', 'rem', viface]) + + @contextlib.contextmanager + def _static_ip(self, iface): + valid_ranges = set(( + IPRange(_quad_dot_to_int('192.168.0.0'), 16), + IPRange(_quad_dot_to_int('172.16.0.0'), 12), + IPRange(_quad_dot_to_int('10.0.0.0'), 8), + )) + invalid_ranges = set(IPRange(prefix, prefixlen) + for (prefix, prefixlen) in _get_routes()) + prefix, prefixlen = find_subnet(valid_ranges, invalid_ranges) + netaddr = prefix + dhcp_server_ip = netaddr + 1 + client_ip = netaddr + 2 + broadcast_ip = prefix | ((1 << (32 - prefixlen)) - 1) + self.status(msg='Assigning ip address %(ip)s/%(prefixlen)d to ' + 'iface %(iface)s', + ip=_int_to_quad_dot(dhcp_server_ip), prefixlen=prefixlen, + iface=iface) + subprocess.check_call(['ip', 'addr', 'add', + '{}/{}'.format(_int_to_quad_dot(dhcp_server_ip), + prefixlen), + 'broadcast', _int_to_quad_dot(broadcast_ip), + 'scope', 'global', + 'dev', iface]) + try: + yield (dhcp_server_ip, client_ip, broadcast_ip) + finally: + self.status(msg='Removing ip addresses from iface %(iface)s', + iface=iface) + subprocess.call(['ip', 'addr', 'flush', 'dev', iface]) + + @contextlib.contextmanager + def _up_interface(self, iface): + self.status(msg='Bringing interface %(iface)s up', iface=iface) + subprocess.check_call(['ip', 'link', 'set', iface, 'up']) + try: + yield + finally: + self.status(msg='Bringing interface %(iface)s down', iface=iface) + subprocess.call(['ip', 'link', 'set', iface, 'down']) + + @contextlib.contextmanager + def static_ip(self, interface): + with self._static_ip(iface=interface) as (host_ip, client_ip, + broadcast_ip), \ + self._up_interface(iface=interface): + yield (_int_to_quad_dot(host_ip), + _int_to_quad_dot(client_ip), + _int_to_quad_dot(broadcast_ip)) + + @contextlib.contextmanager + def vlan(self, interface, vlan): + with self._vlan(interface=interface, vlan=vlan) as viface, \ + self.static_ip(interface=viface) \ + as (host_ip, client_ip, broadcast_ip): + yield host_ip, client_ip, broadcast_ip + + @contextlib.contextmanager + def _tempdir(self): + td = tempfile.mkdtemp() + print 'Created tempdir:', td + try: + yield td + finally: + shutil.rmtree(td, ignore_errors=True) + + @contextlib.contextmanager + def _remote_tempdir(self, hostname, template): + persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') + td = cliapp.ssh_runcmd(hostname, ['mktemp', '-d', template]).strip() + try: + yield td + finally: + if not persist: + cliapp.ssh_runcmd(hostname, ['find', td, '-delete']) + + def _serve_tftpd(self, sock, host, port, interface, tftproot): + self.settings.progname = 'tftp server' + self._set_process_name() + while True: + logging.debug('tftpd waiting for connections') + # recvfrom with MSG_PEEK is how you accept UDP connections + _, peer = sock.recvfrom(0, socket.MSG_PEEK) + conn = sock + logging.debug('Connecting socket to peer: ' + repr(peer)) + conn.connect(peer) + # The existing socket is now only serving that peer, so we need to + # bind a new UDP socket to the wildcard address, which needs the + # port to be in REUSEADDR mode. + conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + logging.debug('Binding replacement socket to ' + repr((host, port))) + sock.bind((host, port)) + + logging.debug('tftpd server handing connection to tftpd') + tftpd_serve = ['tftpd', '-rl', tftproot] + ret = subprocess.call(args=tftpd_serve, stdin=conn, + stdout=conn, stderr=None, close_fds=True) + # It's handy to turn off REUSEADDR after the rebinding, + # so we can protect against future bind attempts on this port. + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0) + logging.debug('tftpd exited %d' % ret) + os._exit(0) + + @contextlib.contextmanager + def _spawned_tftp_server(self, tftproot, host_ip, interface, tftp_port=0): + # inetd-style launchers tend to bind UDP ports with SO_REUSEADDR, + # because they need to have multiple ports bound, one for recieving + # all connection attempts on that port, and one for each concurrent + # connection with a peer + # this makes detecting whether there's a tftpd running difficult, so + # we'll instead use an ephemeral port and configure the PXE boot to + # use that tftp server for the kernel + s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) + s.bind((host_ip, tftp_port)) + host, port = s.getsockname() + self.status(msg='Bound listen socket to %(host)s, %(port)s', + host=host, port=port) + pid = os.fork() + if pid == 0: + try: + self._serve_tftpd(sock=s, host=host, port=port, + interface=interface, tftproot=tftproot) + except BaseException as e: + import traceback + traceback.print_exc() + os._exit(1) + s.close() + with executor(pid): + try: + yield port + finally: + self.status(msg='Killing tftpd listener pid=%(pid)d', + pid=pid) + os.kill(pid, signal.SIGKILL) + + @contextlib.contextmanager + def tftp_server(self, host_ip, interface, tftp_port=0): + with self._tempdir() as tftproot, \ + self._spawned_tftp_server(tftproot=tftproot, host_ip=host_ip, + interface=interface, + tftp_port=tftp_port) as tftp_port: + self.status(msg='Serving tftp root %(tftproot)s, on port %(port)d', + port=tftp_port, tftproot=tftproot) + yield tftp_port, tftproot + + @contextlib.contextmanager + def _local_copy(self, src, dst): + self.status(msg='Installing %(src)s to %(dst)s', + src=src, dst=dst) + shutil.copy2(src=src, dst=dst) + try: + yield + finally: + self.status(msg='Removing %(dst)s', dst=dst) + os.unlink(dst) + + @contextlib.contextmanager + def _local_symlink(self, src, dst): + os.symlink(src, dst) + try: + yield + finally: + os.unlink(dst) + + def local_pxelinux(self, tftproot): + return self._local_copy('/usr/share/syslinux/pxelinux.0', + os.path.join(tftproot, 'pxelinux.0')) + + def local_kernel(self, rootfs, tftproot): + return self._local_copy(os.path.join(rootfs, 'boot/vmlinuz'), + os.path.join(tftproot, 'kernel')) + + @contextlib.contextmanager + def _remote_copy(self, hostname, src, dst): + persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') + with open(src, 'r') as f: + cliapp.ssh_runcmd(hostname, + ['install', '-D', '-m644', '/proc/self/fd/0', + dst], stdin=f, stdout=None, stderr=None) + try: + yield + finally: + if not persist: + cliapp.ssh_runcmd(hostname, ['rm', dst]) + + @contextlib.contextmanager + def _remote_symlink(self, hostname, src, dst): + persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') + cliapp.ssh_runcmd(hostname, + ['ln', '-s', '-f', src, dst], + stdin=None, stdout=None, stderr=None) + try: + yield + finally: + if not persist: + cliapp.ssh_runcmd(hostname, ['rm', '-f', dst]) + + @contextlib.contextmanager + def remote_kernel(self, rootfs, tftp_url, macaddr): + for name in ('vmlinuz', 'zImage', 'uImage'): + kernel_path = os.path.join(rootfs, 'boot', name) + if os.path.exists(kernel_path): + break + else: + raise cliapp.AppException('Failed to locate kernel') + url = urlparse.urlsplit(tftp_url) + basename = '{}-kernel'.format(_normalise_macaddr(macaddr)) + target_path = os.path.join(url.path, basename) + with self._remote_copy(hostname=url.hostname, src=kernel_path, + dst=target_path): + yield basename + + @contextlib.contextmanager + def remote_fdt(self, rootfs, tftp_url, macaddr): + fdt_rel_path = os.environ.get('DTB_PATH', '') + if fdt_rel_path == '': + yield + fdt_abs_path = os.path.join(rootfs, fdt_rel_path) + if not fdt_abs_path: + raise cliapp.AppException('Failed to locate Flattened Device Tree') + url = urlparse.urlsplit(tftp_url) + basename = '{}-fdt'.format(_normalise_macaddr(macaddr)) + target_path = os.path.join(url.path, basename) + with self._remote_copy(hostname=url.hostname, src=fdt_abs_path, + dst=target_path): + yield basename + + @contextlib.contextmanager + def local_nfsroot(self, rootfs, target_ip): + nfsroot = target_ip + ':' + rootfs + self.status(msg='Exporting %(nfsroot)s as local nfsroot', + nfsroot=nfsroot) + cliapp.runcmd(['exportfs', '-o', 'ro,insecure,no_root_squash', + nfsroot]) + try: + yield + finally: + self.status(msg='Removing %(nfsroot)s from local nfsroots', + nfsroot=nfsroot) + cliapp.runcmd(['exportfs', '-u', nfsroot]) + + @contextlib.contextmanager + def remote_nfsroot(self, rootfs, rsync_url, macaddr): + url = urlparse.urlsplit(rsync_url) + template = os.path.join(url.path, + _normalise_macaddr(macaddr) + '.XXXXXXXXXX') + with self._remote_tempdir(hostname=url.hostname, template=template) \ + as tempdir: + nfsroot = urlparse.urlunsplit((url.scheme, url.netloc, tempdir, + url.query, url.fragment)) + cliapp.runcmd(['rsync', '-asSPH', '--delete', rootfs, nfsroot], + stdin=None, stdout=open(os.devnull, 'w'), + stderr=None) + yield os.path.join(os.path.basename(tempdir), + os.path.basename(rootfs)) + + @staticmethod + def _write_pxe_config(fh, kernel_tftp_url, rootfs_nfs_url, device=None, + fdt_subpath=None, extra_args=''): + + if device is None: + ip_cfg = "ip=dhcp" + else: + ip_cfg = "ip=:::::{device}:dhcp::".format(device=device) + + fh.write(textwrap.dedent('''\ + DEFAULT default + LABEL default + LINUX {kernel_url} + APPEND root=/dev/nfs {ip_cfg} nfsroot={rootfs_nfs_url} {extra_args} + ''').format(kernel_url=kernel_tftp_url, ip_cfg=ip_cfg, + rootfs_nfs_url=rootfs_nfs_url, extra_args=extra_args)) + if fdt_subpath is not None: + fh.write("FDT {}\n".format(fdt_subpath)) + fh.flush() + + @contextlib.contextmanager + def local_pxeboot_config(self, tftproot, macaddr, ip, tftp_port, + nfsroot_dir, device=None): + kernel_tftp_url = 'tftp://{}:{}/kernel'.format(ip, tftp_port) + rootfs_nfs_url = '{}:{}'.format(ip, nfsroot_dir) + pxe_cfg_filename = _normalise_macaddr(macaddr) + pxe_cfg_path = os.path.join(tftproot, 'pxelinux.cfg', pxe_cfg_filename) + os.makedirs(os.path.dirname(pxe_cfg_path)) + with open(pxe_cfg_path, 'w') as f: + self._write_pxe_config(fh=f, kernel_tftp_url=kernel_tftp_url, + rootfs_nfs_url=rootfs_nfs_url, + device=device, + extra_args=os.environ.get('KERNEL_ARGS','')) + + try: + with self._local_symlink( + src=pxe_cfg_filename, + dst=os.path.join(tftproot, + 'pxelinux.cfg', + '01-' + pxe_cfg_filename)): + yield + finally: + os.unlink(pxe_cfg_path) + + @contextlib.contextmanager + def remote_pxeboot_config(self, tftproot, kernel_tftproot, kernel_subpath, + fdt_subpath, rootfs_nfsroot, rootfs_subpath, + macaddr): + rootfs_nfs_url = '{}/{}'.format(rootfs_nfsroot, rootfs_subpath) + url = urlparse.urlsplit(kernel_tftproot) + kernel_tftp_url = '{}:{}'.format(url.netloc, kernel_subpath) + pxe_cfg_filename = _normalise_macaddr(macaddr) + url = urlparse.urlsplit(tftproot) + inst_cfg_path = os.path.join(url.path, 'pxelinux.cfg') + with tempfile.NamedTemporaryFile() as f: + self._write_pxe_config( + fh=f, kernel_tftp_url=kernel_tftp_url, + fdt_subpath=fdt_subpath, + rootfs_nfs_url=rootfs_nfs_url, + extra_args=os.environ.get('KERNEL_ARGS','')) + with self._remote_copy( + hostname=url.hostname, src=f.name, + dst=os.path.join(inst_cfg_path, + pxe_cfg_filename)), \ + self._remote_symlink( + hostname=url.hostname, + src=pxe_cfg_filename, + dst=os.path.join(inst_cfg_path, + '01-' + pxe_cfg_filename)): + yield + + @contextlib.contextmanager + def dhcp_server(self, interface, host_ip, target_ip, broadcast_ip): + with self._tempdir() as td: + leases_path = os.path.join(td, 'leases') + config_path = os.path.join(td, 'config') + stdout_path = os.path.join(td, 'stdout') + stderr_path = os.path.join(td, 'stderr') + pidfile_path = os.path.join(td, 'pid') + with open(config_path, 'w') as f: + f.write(textwrap.dedent('''\ + start {target_ip} + end {target_ip} + interface {interface} + max_leases 1 + lease_file {leases_path} + pidfile {pidfile_path} + boot_file pxelinux.0 + option dns {host_ip} + option broadcast {broadcast_ip} + ''').format(**locals())) + with open(stdout_path, 'w') as stdout, \ + open(stderr_path, 'w') as stderr: + sp = subprocess.Popen(['udhcpd', '-f', config_path], cwd=td, + stdin=open(os.devnull), stdout=stdout, + stderr=stderr) + try: + with executor(sp.pid): + yield + finally: + sp.terminate() + + def get_interface_ip(self, interface): + ip_addresses = [] + info = cliapp.runcmd(['ip', '-o', '-f', 'inet', + 'addr', 'show', interface]).rstrip('\n') + if info: + tokens = collections.deque(info.split()[1:]) + ifname = tokens.popleft() + while tokens: + tok = tokens.popleft() + if tok == 'inet': + address = tokens.popleft() + address, netmask = address.split('/') + ip_addresses.append(address) + elif tok == 'brd': + tokens.popleft() # not interested in broadcast address + elif tok == 'scope': + tokens.popleft() # not interested in scope tag + else: + continue + if not ip_addresses: + raise cliapp.AppException('Interface %s has no addresses' + % interface) + if len(ip_addresses) > 1: + warnings.warn('Interface %s has multiple addresses, ' + 'using first (%s)' % (interface, ip_addresses[0])) + return ip_addresses[0] + + def ipmi_set_target_vlan(self): + if any(env_var.startswith('IPMI_') for env_var in os.environ): + # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN + default = textwrap.dedent('''\ + ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ + lan set 1 vlan id "$PXEBOOT_VLAN" + ''') + else: + default = textwrap.dedent('''\ + while true; do + echo Please set the target\\'s vlan to $PXEBOOT_VLAN, \\ + then enter \\"vlanned\\" + read + if [ "$REPLY" = vlanned ]; then + break + fi + done + ''') + command = os.environ.get('PXEBOOT_SET_VLAN_COMMAND', default) + subprocess.check_call(['sh', '-euc', command, '-']) + + def ipmi_pxe_reboot_target(self): + if any(env_var.startswith('IPMI_') for env_var in os.environ): + # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN + default = textwrap.dedent('''\ + set -- ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" + "$@" chassis bootdev pxe + "$@" chassis power reset + ''') + else: + default = textwrap.dedent('''\ + while true; do + echo Please reboot the target in PXE mode, then\\ + enter \\"pxe-booted\\" + read + if [ "$REPLY" = pxe-booted ]; then + break + fi + done + ''') + command = os.environ.get('PXEBOOT_PXE_REBOOT_COMMAND', default) + subprocess.check_call(['sh', '-euc', command, '-']) + + def wait_for_target_to_install(self): + command = os.environ.get( + 'PXEBOOT_WAIT_INSTALL_COMMAND', + textwrap.dedent('''\ + while true; do + echo Please wait for the system to install, then \\ + enter \\"installed\\" + read + if [ "$REPLY" = installed ]; then + break + fi + done + ''')) + subprocess.check_call(['sh', '-euc', command, '-']) + + def ipmi_unset_target_vlan(self): + if any(env_var.startswith('IPMI_') for env_var in os.environ): + # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST + default = textwrap.dedent('''\ + ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ + lan set 1 vlan id off + ''') + else: + default = textwrap.dedent('''\ + while true; do + echo Please reset the target\\'s vlan, \\ + then enter \\"unvlanned\\" + read + if [ "$REPLY" = unvlanned ]; then + break + fi + done + ''') + command = os.environ.get('PXEBOOT_UNSET_VLAN_COMMAND', default) + subprocess.check_call(['sh', '-euc', command, '-']) + + def ipmi_reboot_target(self): + if any(env_var.startswith('IPMI_') for env_var in os.environ): + # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST + default = textwrap.dedent('''\ + ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ + chassis power reset + ''') + else: + default = textwrap.dedent('''\ + while true; do + echo Please reboot the target, then\\ + enter \\"rebooted\\" + read + if [ "$REPLY" = rebooted ]; then + break + fi + done + ''') + command = os.environ.get('PXEBOOT_REBOOT_COMMAND', default) + subprocess.check_call(['sh', '-euc', command, '-']) + + def process_args(self, (temp_root, macaddr)): + interface = os.environ.get('PXEBOOT_DEPLOYER_INTERFACE', None) + target_interface = os.environ.get('PXEBOOT_TARGET_INTERFACE', None) + vlan = os.environ.get('PXEBOOT_VLAN') + if vlan is not None: vlan = int(vlan) + mode = os.environ.get('PXEBOOT_MODE') + if mode is None: + if interface: + if vlan is not None: + mode = 'spawn-vlan' + else: + if 'PXEBOOT_CONFIG_TFTP_ADDRESS' in os.environ: + mode = 'existing-dhcp' + else: + mode = 'spawn-novlan' + else: + mode = 'existing-server' + assert mode in ('spawn-vlan', 'spawn-novlan', 'existing-dhcp', + 'existing-server') + if mode == 'spawn-vlan': + with self.vlan(interface=interface, vlan=vlan) \ + as (host_ip, target_ip, broadcast_ip), \ + self.tftp_server(host_ip=host_ip, interface=interface) \ + as (tftp_port, tftproot), \ + self.local_pxelinux(tftproot=tftproot), \ + self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ + self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \ + self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr, + device=target_interface, + ip=host_ip, tftp_port=tftp_port, + nfsroot_dir=temp_root), \ + self.dhcp_server(interface=interface, host_ip=host_ip, + target_ip=target_ip, + broadcast_ip=broadcast_ip): + self.ipmi_set_target_vlan() + self.ipmi_pxe_reboot_target() + self.wait_for_target_to_install() + self.ipmi_unset_target_vlan() + self.ipmi_reboot_target() + elif mode == 'spawn-novlan': + with self.static_ip(interface=interface) as (host_ip, target_ip, + broadcast_ip), \ + self.tftp_server(host_ip=host_ip, interface=interface, + tftp_port=69) \ + as (tftp_port, tftproot), \ + self.local_pxelinux(tftproot=tftproot), \ + self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ + self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \ + self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr, + device=target_interface, + ip=host_ip, tftp_port=tftp_port, + nfsroot_dir=temp_root), \ + self.dhcp_server(interface=interface, host_ip=host_ip, + target_ip=target_ip, + broadcast_ip=broadcast_ip): + self.ipmi_pxe_reboot_target() + self.wait_for_target_to_install() + self.ipmi_reboot_target() + elif mode == 'existing-dhcp': + ip = self.get_interface_ip(interface) + config_tftpaddr = os.environ['PXEBOOT_CONFIG_TFTP_ADDRESS'] + with self.tftp_server(ip=ip, interface=interface, tftp_port=69) \ + as (tftp_port, tftproot), \ + self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ + self.local_nfsroot(rootfs=temp_root, client_ip=''): + kernel_tftproot = 'tftp://{}:{}/'.format(ip, tftp_port) + rootfs_nfsroot = '{}:{}'.format(ip, temp_root) + with self.remote_pxeboot_config( + tftproot=config_tftpaddr, + kernel_tftproot=kernel_tftproot, + kernel_subpath='kernel', + rootfs_nfsroot=nfsroot, + rootfs_subpath='', + macaddr=macaddr): + self.ipmi_pxe_reboot_target() + self.wait_for_target_to_install() + self.ipmi_reboot_target() + elif mode == 'existing-server': + config_tftpaddr = os.environ[ 'PXEBOOT_CONFIG_TFTP_ADDRESS'] + kernel_tftpaddr = os.environ.get('PXEBOOT_KERNEL_TFTP_ADDRESS', + config_tftpaddr) + url = urlparse.urlsplit(kernel_tftpaddr) + kernel_tftproot = os.environ.get('PXEBOOT_KERNEL_TFTP_ROOT', + 'tftp://%s/%s' % (url.hostname, + url.path)) + rootfs_rsync = os.environ['PXEBOOT_ROOTFS_RSYNC_ADDRESS'] + url = urlparse.urlsplit(rootfs_rsync) + nfsroot = os.environ.get('PXEBOOT_ROOTFS_NFSROOT', + '%s:%s' % (url.hostname, url.path)) + with self.remote_kernel(rootfs=temp_root, tftp_url=kernel_tftpaddr, + macaddr=macaddr) as kernel_subpath, \ + self.remote_fdt(rootfs=temp_root, tftp_url=kernel_tftpaddr, + macaddr=macaddr) as fdt_subpath, \ + self.remote_nfsroot(rootfs=temp_root, rsync_url=rootfs_rsync, \ + macaddr=macaddr) as rootfs_subpath, \ + self.remote_pxeboot_config(tftproot=config_tftpaddr, + kernel_tftproot=kernel_tftproot, + kernel_subpath=kernel_subpath, + fdt_subpath=fdt_subpath, + rootfs_nfsroot=nfsroot, + rootfs_subpath=rootfs_subpath, + macaddr=macaddr): + persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') + if not persist: + self.ipmi_pxe_reboot_target() + self.wait_for_target_to_install() + self.ipmi_reboot_target() + else: + cliapp.AppException('Invalid PXEBOOT_MODE: %s' % mode) + +PXEBoot().run() diff --git a/extensions/pxeboot.write.help b/extensions/pxeboot.write.help new file mode 100644 index 00000000..7cb78bce --- /dev/null +++ b/extensions/pxeboot.write.help @@ -0,0 +1,166 @@ +help: > + pxeboot.write extension. + + + This write extension will serve your generated system over NFS to + the target system. + + In all modes `location` is the mac address of the interface that + the target will PXE boot from. This is used so that the target will + load the configuration file appropriate to it. + + + # `PXEBOOT_MODE` + + + It has 4 modes, which can be specified with PXEBOOT_MODE, or inferred + from which parameters are passed: + + + ## spawn-vlan + + + Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_VLAN to configure + the target to pxeboot on a vlan and spawn a dhcp, nfs and tftp + server. This is potentially the fastest, since it doesn't need to + copy data to other servers. + + This will create a vlan interface for the interface specified in + PXEBOOT_DEPLOYER_INTERFACE and spawn a dhcp server which serves + pxelinux.0, a configuration file and a kernel image from itself. + + The configuration file informs the target to boot with a kernel + command-line that uses an NFS root served from the deployment host. + + + ## spawn-novlan + + + Specify PXEBOOT_DEPLOYER_INTERFACE without PXEBOOT_VLAN to configure + like `spawn-vlan`, but without creating the vlan interface. + + This assumes that you have exclusive access to the interface, such + as if you're plugged in to the device directly, or your interface + is vlanned by your infrastructure team. + + This is required if you are serving from a VM and bridging it to the + correct network via macvtap. For this to work, you need to macvtap + bridge to a pre-vlanned interface on your host machine. + + + ## existing-dhcp + + + Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_CONFIG_TFTP_ADDRESS + to put config on an existing tftp server, already configured by the + dhcp server. + + This spawns a tftp server and configures the local nfs server, but + doesn't spawn a dhcp server. This is useful if you have already got a + dhcp server that serves PXE images. + + PXEBOOT_CONFIG_TFTP_ADDRESS is a URL in the form `sftp://$HOST/$PATH`. + The configuration file is copied to `$PATH/pxelinux.cfg/` on the + target identified by `$HOST`. + + + ## existing-server + + + Specify at least PXEBOOT_CONFIG_TFTP_ADDRESS and + PXEBOOT_ROOTFS_RSYNC_ADDRESS to specify existing servers to copy + config, kernels and the rootfs to. + + Configuration is copied to the target as `existing-dhcp`. + + Specify PXEBOOT_KERNEL_TFTP_ADDRESS if the tftp server that the + kernel must be downloaded from is different to that of the pxelinux + configuration file. + + PXEBOOT_ROOTFS_RSYNC_ADDRESS is a rsync URL describing where to copy + nfsroots to where they will be exported by the NFS server. + + Specify PXEBOOT_ROOTFS_NFSROOT if the nfsroot appears as a different + address from the target's perspective. + + + # IPMI commands + + + After the PXE boot has been set up, the target needs to be rebooted + in PXE mode. + + If the target is IPMI enabled, you can set `IPMI_USER`, `IPMI_HOST` + and `IPMI_PASSWORD` to make it reboot the target into netboot mode + automatically. + + If they are not specified, then instructions will be displayed, and + `pxeboot.write` will wait for you to finish. + + If there are command-line automation tools for rebooting the target + in netboot mode, then appropriate commands can be defined in the + following variables. + + + ## PXEBOOT_PXE_REBOOT_COMMAND + + + This command will be used to reboot the target device with its boot + device set to PXE boot. + + + ## PXEBOOT_REBOOT_COMMAND + + + This command will be used to reboot the target device in its default + boot mode. + + + ## PXEBOOT_WAIT_INSTALL_COMMAND + + + If it is possible for the target to notify you that it has finished + installing, you can put a command in here to wait for the event. + + + # Misc + + + ## KERNEL_ARGS + + + Additional kernel command line options. Note that the following + options + + root=/dev/nfs ip=dhcp nfsroot=$NFSROOT` + + are implicitly added by the extension. + + + ## DTB_PATH + + + Location in the deployed root filesystem of the Flattened Device + Tree blob (FDT) to use. + + + ## PXE_INSTALLER + + + If set to `no`, `False` or any other YAML value for false, the + remotely installed rootfs, kernel, bootloader config file and + device tree blob if specified, will not be removed after the + deployment finishes. This variable is only meanful on the + `existing-server` mode. + + + ## PXEBOOT_TARGET_INTERFACE + + Name of the interface of the target to pxeboot from. Some targets + with more than one interface try to get the rootfs from a different + interface than the interface from where the pxeboot server is + reachable. Using this variable, the kernel arguments will be filled + to include the device. + + Note that the name of this interface is the kernel's default name, + usually called ethX, and is non-determinisic. diff --git a/extensions/rawdisk.check b/extensions/rawdisk.check new file mode 100755 index 00000000..9be0ce91 --- /dev/null +++ b/extensions/rawdisk.check @@ -0,0 +1,53 @@ +#!/usr/bin/python +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +'''Preparatory checks for Morph 'rawdisk' write extension''' + +import cliapp + +import morphlib.writeexts + +import os + + +class RawdiskCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + self.require_btrfs_in_deployment_host_kernel() + + location = args[0] + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + if not self.is_device(location): + if not os.path.isfile(location): + raise cliapp.AppException( + 'Cannot upgrade %s: it is not an existing disk image' % + location) + + version_label = os.environ.get('VERSION_LABEL') + if version_label is None: + raise cliapp.AppException( + 'VERSION_LABEL was not given. It is required when ' + 'upgrading an existing system.') + else: + if not self.is_device(location): + if os.path.exists(location): + raise cliapp.AppException( + 'Target %s already exists. Use `morph upgrade` if you ' + 'want to update an existing image.' % location) + +RawdiskCheckExtension().run() diff --git a/extensions/rawdisk.write b/extensions/rawdisk.write new file mode 100755 index 00000000..6f2d45ba --- /dev/null +++ b/extensions/rawdisk.write @@ -0,0 +1,108 @@ +#!/usr/bin/python +# Copyright (C) 2012-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +'''A Morph deployment write extension for raw disk images.''' + + +import cliapp +import os +import sys +import time +import tempfile + +import morphlib.writeexts + + +class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): + + '''See rawdisk.write.help for documentation''' + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + upgrade = self.get_environment_boolean('UPGRADE') + + if upgrade: + self.upgrade_local_system(location, temp_root) + else: + try: + if not self.is_device(location): + with self.created_disk_image(location): + self.format_btrfs(location) + self.create_system(temp_root, location) + self.status(msg='Disk image has been created at %s' % + location) + else: + self.format_btrfs(location) + self.create_system(temp_root, location) + self.status(msg='System deployed to %s' % location) + except Exception: + self.status(msg='Failure to deploy system to %s' % + location) + raise + + def upgrade_local_system(self, raw_disk, temp_root): + self.complete_fstab_for_btrfs_layout(temp_root) + + with self.mount(raw_disk) as mp: + version_label = self.get_version_label(mp) + self.status(msg='Updating image to a new version with label %s' % + version_label) + + version_root = os.path.join(mp, 'systems', version_label) + os.mkdir(version_root) + + old_orig = os.path.join(mp, 'systems', 'default', 'orig') + new_orig = os.path.join(version_root, 'orig') + cliapp.runcmd( + ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig]) + + cliapp.runcmd( + ['rsync', '-a', '--checksum', '--numeric-ids', '--delete', + temp_root + os.path.sep, new_orig]) + + self.create_run(version_root) + + default_path = os.path.join(mp, 'systems', 'default') + if os.path.exists(default_path): + os.remove(default_path) + else: + # we are upgrading and old system that does + # not have an updated extlinux config file + if self.bootloader_config_is_wanted(): + self.generate_bootloader_config(mp) + self.install_bootloader(mp) + os.symlink(version_label, default_path) + + if self.bootloader_config_is_wanted(): + self.install_kernel(version_root, temp_root) + + def get_version_label(self, mp): + version_label = os.environ.get('VERSION_LABEL') + + if version_label is None: + raise cliapp.AppException('VERSION_LABEL was not given') + + if os.path.exists(os.path.join(mp, 'systems', version_label)): + raise cliapp.AppException('VERSION_LABEL %s already exists' + % version_label) + + return version_label + + +RawDiskWriteExtension().run() diff --git a/extensions/rawdisk.write.help b/extensions/rawdisk.write.help new file mode 100644 index 00000000..52ed73fb --- /dev/null +++ b/extensions/rawdisk.write.help @@ -0,0 +1,82 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + + Write a system produced by Morph to a physical disk, or to a file that can + be used as a virtual disk. The target will be formatted as a single Btrfs + partition, with the system image written to a subvolume in /systems, and + other subvolumes created for /home, /opt, /root, /srv and /var. + + When written to a physical drive, the drive can be used as the boot device + for a 'real' machine. + + When written to a file, the file can be used independently of `morph` to + create virtual machines with KVM / libvirt, OpenStack or, after converting + it to VDI format, VirtualBox. + + `morph deploy` will fail if the file specified by `location` already + exists. + + If used in `morph upgrade`, the rootfs produced by 'morph build' is added + to the existing raw disk image or device as an additional btrfs sub-volume. + `morph upgrade` will fail if the file specified by `location` does not + exist, or is not a Baserock raw disk image. (Most users are unlikely to + need or use this functionality: it is useful mainly for developers working + on the Baserock tools.) + + Parameters: + + * location: the pathname of the disk image to be created/upgraded, or the + path to the physical device. + + * VERSION_LABEL=label - should contain only alpha-numeric + characters and the '-' (hyphen) character. Mandatory if being used with + `morph update` + + * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to + tell Linux to use, rather than booting the rootfs directly. + + * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree + binary - Give the full path (without a leading /) to the location of the + DTB in the built system image . The deployment will fail if `path` does + not exist. + + * BOOTLOADER_INSTALL=value: the bootloader to be installed + **(MANDATORY)** for non-x86 systems + + allowed values = + - 'extlinux' (default) - the extlinux bootloader will + be installed + - 'none' - no bootloader will be installed by `morph deploy`. A + bootloader must be installed manually. This value must be used when + deploying non-x86 systems such as ARM. + + * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used. + If not specified for x86-32 and x86-64 systems, 'extlinux' will be used + + allowed values = + - 'extlinux' + + * KERNEL_ARGS=args: optional additional kernel command-line parameters to + be appended to the default set. The default set is: + + 'rw init=/sbin/init rootfstype=btrfs \ + rootflags=subvol=systems/default/run \ + root=[name or UUID of root filesystem]' + + (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt) + + (See `morph help deploy` for details of how to pass parameters to write + extensions) diff --git a/extensions/sdk.write b/extensions/sdk.write new file mode 100755 index 00000000..8d3d2a63 --- /dev/null +++ b/extensions/sdk.write @@ -0,0 +1,284 @@ +#!/bin/sh +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# =*= License: GPL-2 =*= + +set -eu + +die(){ + echo "$@" >&2 + exit 1 +} + +shellescape(){ + echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'" +} + +########################## END OF COMMON HEADER ############################### +# +# The above lines, as well as being part of this script, are copied into the +# self-installing SDK blob's header script, as a means of re-using content. +# + +help(){ + cat <>"$OUTPUT_SCRIPT" <>"$OUTPUT_SCRIPT" <<'EOF' +########################### START OF HEADER SCRIPT ############################ + +usage(){ + cat <&2 + usage >&2 + exit 1 +fi + +TOOLCHAIN_PATH="$(readlink -f \"$1\")" + +sedescape(){ + # Escape the passed in string so it can be safely interpolated into + # a sed expression as a literal value. + echo "$1" | sed -e 's/[\/&]/\\&/g' +} + +prepend_to_path_elements(){ + # Prepend $1 to every entry in the : separated list specified as $2. + local prefix="$1" + ( + # Split path into components + IFS=: + set -- $2 + # Print path back out with new prefix + printf %s "$prefix/$1" + shift + for arg in "$@"; do + printf ":%s" "$prefix/$arg" + done + ) +} + +extract_rootfs(){ + # Extract the bzipped tarball at the end of the script passed as $1 + # to the path specified as $2 + local selfextractor="$1" + local target="$2" + local script_end="$(($(\ + grep -aEn -m1 -e '^#+ END OF HEADER SCRIPT #+$' "$selfextractor" | + cut -d: -f1) + 1 ))" + mkdir -p "$target" + tail -n +"$script_end" "$selfextractor" | tar -xj -C "$target" . +} + +amend_text_file_paths(){ + # Replace all instances of $3 with $4 in the directory specified by $1 + # excluding the subdirectory $2 + local root="$1" + local inner_sysroot="$2" + local old_prefix="$3" + local new_prefix="$4" + find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \ + -exec sh -c 'file "$1" | grep -q text' - {} \; \ + -exec sed -i -e \ + "s/$(sedescape "$old_prefix")/$(sedescape "$new_prefix")/g" {} + +} + +filter_patchelf_errors(){ + # Filter out warnings from patchelf that are acceptable + # The warning that it's making a file bigger is just noise + # The warning about not being an ELF executable just means we got a + # false positive from file that it was an ELF binary + # Failing to find .interp is because for convenience, we set the + # interpreter in the same command as setting the rpath, even though + # we give it both executables and libraries. + grep -v -e 'warning: working around a Linux kernel bug' \ + -e 'not an ELF executable' \ + -e 'cannot find section .interp' +} + +patch_elves(){ + # Set the interpreter and library paths of ELF binaries in $1, + # except for the $2 subdirectory, using the patchelf command in the + # toolchain specified as $3, so that it uses the linker specified + # as $4 as the interpreter, and the runtime path specified by $5. + # + # The patchelf inside the toolchain is used to ensure that it works + # independently of the availability of patchelf on the host. + # + # This is possible by invoking the linker directly and specifying + # --linker-path as the RPATH we want to set the binaries to use. + local root="$1" + local inner_sysroot="$2" + local patchelf="$3" + local linker="$4" + local lib_path="$5" + find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \ + -type f -perm +u=x \ + -exec sh -c 'file "$1" | grep -q "ELF"' - {} \; \ + -exec "$linker" --library-path "$lib_path" \ + "$patchelf" --set-interpreter "$linker" \ + --set-rpath "$lib_path" {} \; 2>&1 \ + | filter_patchelf_errors +} + +generate_environment_setup(){ + local target="$1" + install -m 644 -D /dev/stdin "$target" <>"$OUTPUT_SCRIPT" . diff --git a/extensions/set-hostname.configure b/extensions/set-hostname.configure new file mode 100755 index 00000000..4b2424d8 --- /dev/null +++ b/extensions/set-hostname.configure @@ -0,0 +1,26 @@ +#!/bin/sh +# Copyright (C) 2013,2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +# Set hostname on system from HOSTNAME. + + +set -e + +if [ -n "$HOSTNAME" ] +then + echo "$HOSTNAME" > "$1/etc/hostname" +fi + diff --git a/extensions/simple-network.configure b/extensions/simple-network.configure new file mode 100755 index 00000000..4a70f311 --- /dev/null +++ b/extensions/simple-network.configure @@ -0,0 +1,292 @@ +#!/usr/bin/python +# Copyright (C) 2013,2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +'''A Morph deployment configuration extension to handle network configutation + +This extension prepares /etc/network/interfaces and networkd .network files +in /etc/systemd/network/ with the interfaces specified during deployment. + +If no network configuration is provided, eth0 will be configured for DHCP +with the hostname of the system in the case of /etc/network/interfaces. +In the case of networkd, any interface starting by e* will be configured +for DHCP +''' + + +import os +import sys +import errno +import cliapp + +import morphlib + + +class SimpleNetworkError(morphlib.Error): + '''Errors associated with simple network setup''' + pass + + +class SimpleNetworkConfigurationExtension(cliapp.Application): + '''Configure /etc/network/interfaces and generate networkd .network files + + Reading NETWORK_CONFIG, this extension sets up /etc/network/interfaces + and .network files in /etc/systemd/network/. + ''' + + def process_args(self, args): + network_config = os.environ.get("NETWORK_CONFIG") + + self.rename_networkd_chunk_file(args) + + if network_config is None: + self.generate_default_network_config(args) + else: + self.status(msg="Processing NETWORK_CONFIG=%(nc)s", + nc=network_config) + + stanzas = self.parse_network_stanzas(network_config) + + self.generate_interfaces_file(args, stanzas) + self.generate_networkd_files(args, stanzas) + + def rename_networkd_chunk_file(self, args): + """Rename the 10-dchp.network file generated in the systemd chunk + + The systemd chunk will place something in 10-dhcp.network, which will + have higher precedence than anything added in this extension (we + start at 50-*). + + We should check for that file and rename it instead remove it in + case the file is being used by the user. + + Until both the following happen, we should continue to rename that + default config file: + + 1. simple-network.configure is always run when systemd is included + 2. We've been building systems without systemd including that default + networkd config for long enough that nobody should be including + that config file. + """ + file_path = os.path.join(args[0], "etc", "systemd", "network", + "10-dhcp.network") + + if os.path.isfile(file_path): + try: + os.rename(file_path, file_path + ".morph") + self.status(msg="Renaming networkd file from systemd chunk: \ + %(f)s to %(f)s.morph", f=file_path) + except OSError: + pass + + def generate_default_network_config(self, args): + """Generate default network config: DHCP in all the interfaces""" + + default_network_config_interfaces = "lo:loopback;" \ + "eth0:dhcp,hostname=$(hostname)" + default_network_config_networkd = "e*:dhcp" + + stanzas_interfaces = self.parse_network_stanzas( + default_network_config_interfaces) + stanzas_networkd = self.parse_network_stanzas( + default_network_config_networkd) + + self.generate_interfaces_file(args, stanzas_interfaces) + self.generate_networkd_files(args, stanzas_networkd) + + def generate_interfaces_file(self, args, stanzas): + """Generate /etc/network/interfaces file""" + + iface_file = self.generate_iface_file(stanzas) + + directory_path = os.path.join(args[0], "etc", "network") + self.make_sure_path_exists(directory_path) + file_path = os.path.join(directory_path, "interfaces") + with open(file_path, "w") as f: + f.write(iface_file) + + def generate_iface_file(self, stanzas): + """Generate an interfaces file from the provided stanzas. + + The interfaces will be sorted by name, with loopback sorted first. + """ + + def cmp_iface_names(a, b): + a = a['name'] + b = b['name'] + if a == "lo": + return -1 + elif b == "lo": + return 1 + else: + return cmp(a,b) + + return "\n".join(self.generate_iface_stanza(stanza) + for stanza in sorted(stanzas, cmp=cmp_iface_names)) + + def generate_iface_stanza(self, stanza): + """Generate an interfaces stanza from the provided data.""" + + name = stanza['name'] + itype = stanza['type'] + lines = ["auto %s" % name, "iface %s inet %s" % (name, itype)] + lines += [" %s %s" % elem for elem in stanza['args'].items()] + lines += [""] + return "\n".join(lines) + + def generate_networkd_files(self, args, stanzas): + """Generate .network files""" + + for i, stanza in enumerate(stanzas, 50): + iface_file = self.generate_networkd_file(stanza) + + if iface_file is None: + continue + + directory_path = os.path.join(args[0], "etc", "systemd", "network") + self.make_sure_path_exists(directory_path) + file_path = os.path.join(directory_path, + "%s-%s.network" % (i, stanza['name'])) + + with open(file_path, "w") as f: + f.write(iface_file) + + def generate_networkd_file(self, stanza): + """Generate an .network file from the provided data.""" + + name = stanza['name'] + itype = stanza['type'] + pairs = stanza['args'].items() + + if itype == "loopback": + return + + lines = ["[Match]"] + lines += ["Name=%s\n" % name] + lines += ["[Network]"] + if itype == "dhcp": + lines += ["DHCP=yes"] + else: + lines += self.generate_networkd_entries(pairs) + + return "\n".join(lines) + + def generate_networkd_entries(self, pairs): + """Generate networkd configuration entries with the other parameters""" + + address = None + netmask = None + gateway = None + dns = None + lines = [] + + for pair in pairs: + if pair[0] == 'address': + address = pair[1] + elif pair[0] == 'netmask': + netmask = pair[1] + elif pair[0] == 'gateway': + gateway = pair[1] + elif pair[0] == 'dns': + dns = pair[1] + + if address and netmask: + network_suffix = self.convert_net_mask_to_cidr_suffix (netmask); + address_line = address + '/' + str(network_suffix) + lines += ["Address=%s" % address_line] + elif address or netmask: + raise Exception('address and netmask must be specified together') + + if gateway: + lines += ["Gateway=%s" % gateway] + + if dns: + lines += ["DNS=%s" % dns] + + return lines + + def convert_net_mask_to_cidr_suffix(self, mask): + """Convert dotted decimal form of a subnet mask to CIDR suffix notation + + For example: 255.255.255.0 -> 24 + """ + return sum(bin(int(x)).count('1') for x in mask.split('.')) + + def parse_network_stanzas(self, config): + """Parse a network config environment variable into stanzas. + + Network config stanzas are semi-colon separated. + """ + + return [self.parse_network_stanza(s) for s in config.split(";")] + + def parse_network_stanza(self, stanza): + """Parse a network config stanza into name, type and arguments. + + Each stanza is of the form name:type[,arg=value]... + + For example: + lo:loopback + eth0:dhcp + eth1:static,address=10.0.0.1,netmask=255.255.0.0 + """ + elements = stanza.split(",") + lead = elements.pop(0).split(":") + if len(lead) != 2: + raise SimpleNetworkError("Stanza '%s' is missing its type" % + stanza) + iface = lead[0] + iface_type = lead[1] + + if iface_type not in ['loopback', 'static', 'dhcp']: + raise SimpleNetworkError("Stanza '%s' has unknown interface type" + " '%s'" % (stanza, iface_type)) + + argpairs = [element.split("=", 1) for element in elements] + output_stanza = { "name": iface, + "type": iface_type, + "args": {} } + for argpair in argpairs: + if len(argpair) != 2: + raise SimpleNetworkError("Stanza '%s' has bad argument '%r'" + % (stanza, argpair.pop(0))) + if argpair[0] in output_stanza["args"]: + raise SimpleNetworkError("Stanza '%s' has repeated argument" + " %s" % (stanza, argpair[0])) + output_stanza["args"][argpair[0]] = argpair[1] + + return output_stanza + + def make_sure_path_exists(self, path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise SimpleNetworkError("Unable to create directory '%s'" + % path) + + def status(self, **kwargs): + '''Provide status output. + + The ``msg`` keyword argument is the actual message, + the rest are values for fields in the message as interpolated + by %. + + ''' + + self.output.write('%s\n' % (kwargs['msg'] % kwargs)) + +SimpleNetworkConfigurationExtension().run() diff --git a/extensions/ssh-rsync.check b/extensions/ssh-rsync.check new file mode 100755 index 00000000..c3bdfd29 --- /dev/null +++ b/extensions/ssh-rsync.check @@ -0,0 +1,64 @@ +#!/usr/bin/python +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +'''Preparatory checks for Morph 'ssh-rsync' write extension''' + +import cliapp + +import os + +import morphlib.writeexts + +class SshRsyncCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + upgrade = self.get_environment_boolean('UPGRADE') + if not upgrade: + raise cliapp.AppException( + 'The ssh-rsync write is for upgrading existing remote ' + 'Baserock machines. It cannot be used for an initial ' + 'deployment.') + + if os.environ.get('VERSION_LABEL', '') == '': + raise cliapp.AppException( + 'A VERSION_LABEL must be set when deploying an upgrade.') + + location = args[0] + self.check_ssh_connectivity(location) + self.check_is_baserock_system(location) + + # The new system that being deployed as an upgrade must contain + # baserock-system-config-sync and system-version-manager. However, the + # old system simply needs to have SSH and rsync. + self.check_command_exists(location, 'rsync') + + def check_is_baserock_system(self, location): + output = cliapp.ssh_runcmd(location, ['sh', '-c', + 'test -d /baserock || echo -n dirnotfound']) + if output == 'dirnotfound': + raise cliapp.AppException('%s is not a baserock system' + % location) + + def check_command_exists(self, location, command): + test = 'type %s > /dev/null 2>&1 || echo -n cmdnotfound' % command + output = cliapp.ssh_runcmd(location, ['sh', '-c', test]) + if output == 'cmdnotfound': + raise cliapp.AppException( + "%s does not have %s" % (location, command)) + + +SshRsyncCheckExtension().run() diff --git a/extensions/ssh-rsync.write b/extensions/ssh-rsync.write new file mode 100755 index 00000000..6d596500 --- /dev/null +++ b/extensions/ssh-rsync.write @@ -0,0 +1,172 @@ +#!/usr/bin/python +# Copyright (C) 2013-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +'''A Morph deployment write extension for upgrading systems over ssh.''' + + +import contextlib +import cliapp +import os +import sys +import time +import tempfile + +import morphlib.writeexts + + +def ssh_runcmd_ignore_failure(location, command, **kwargs): + try: + return cliapp.ssh_runcmd(location, command, **kwargs) + except cliapp.AppException: + pass + + +class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): + + '''See ssh-rsync.write.help for documentation''' + + + def find_root_disk(self, location): + '''Read /proc/mounts on location to find which device contains "/"''' + + self.status(msg='Finding device that contains "/"') + contents = cliapp.ssh_runcmd(location, ['cat', '/proc/mounts']) + for line in contents.splitlines(): + line_words = line.split() + if (line_words[1] == '/' and line_words[0] != 'rootfs'): + return line_words[0] + + @contextlib.contextmanager + def _remote_mount_point(self, location): + self.status(msg='Creating remote mount point') + remote_mnt = cliapp.ssh_runcmd(location, ['mktemp', '-d']).strip() + try: + yield remote_mnt + finally: + self.status(msg='Removing remote mount point') + cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) + + @contextlib.contextmanager + def _remote_mount(self, location, root_disk, mountpoint): + self.status(msg='Mounting root disk') + cliapp.ssh_runcmd(location, ['mount', root_disk, mountpoint]) + try: + yield + finally: + self.status(msg='Unmounting root disk') + cliapp.ssh_runcmd(location, ['umount', mountpoint]) + + @contextlib.contextmanager + def _created_version_root(self, location, remote_mnt, version_label): + version_root = os.path.join(remote_mnt, 'systems', version_label) + self.status(msg='Creating %(root)s', root=version_root) + cliapp.ssh_runcmd(location, ['mkdir', version_root]) + try: + yield version_root + except BaseException as e: + # catch all, we always want to clean up + self.status(msg='Cleaning up %(root)s', root=version_root) + ssh_runcmd_ignore_failure(location, ['rmdir', version_root]) + raise + + def get_old_orig(self, location, remote_mnt): + '''Identify which subvolume to snapshot from''' + + # rawdisk upgrades use 'default' + return os.path.join(remote_mnt, 'systems', 'default', 'orig') + + @contextlib.contextmanager + def _created_orig_subvolume(self, location, remote_mnt, version_root): + self.status(msg='Creating "orig" subvolume') + old_orig = self.get_old_orig(location, remote_mnt) + new_orig = os.path.join(version_root, 'orig') + cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', 'snapshot', + old_orig, new_orig]) + try: + yield new_orig + except BaseException as e: + ssh_runcmd_ignore_failure( + location, ['btrfs', 'subvolume', 'delete', new_orig]) + raise + + def populate_remote_orig(self, location, new_orig, temp_root): + '''Populate the subvolume version_root/orig on location''' + + self.status(msg='Populating "orig" subvolume') + cliapp.runcmd(['rsync', '-as', '--checksum', '--numeric-ids', + '--delete', temp_root + os.path.sep, + '%s:%s' % (location, new_orig)]) + + @contextlib.contextmanager + def _deployed_version(self, location, version_label, + system_config_sync, system_version_manager): + self.status(msg='Calling system-version-manager to deploy upgrade') + deployment = os.path.join('/systems', version_label, 'orig') + cliapp.ssh_runcmd(location, + ['env', 'BASEROCK_SYSTEM_CONFIG_SYNC='+system_config_sync, + system_version_manager, 'deploy', deployment]) + try: + yield deployment + except BaseException as e: + self.status(msg='Cleaning up failed version installation') + cliapp.ssh_runcmd(location, + [system_version_manager, 'remove', version_label]) + raise + + def upgrade_remote_system(self, location, temp_root): + root_disk = self.find_root_disk(location) + uuid = cliapp.ssh_runcmd(location, ['blkid', '-s', 'UUID', '-o', + 'value', root_disk]).strip() + + self.complete_fstab_for_btrfs_layout(temp_root, uuid) + + version_label = os.environ['VERSION_LABEL'] + autostart = self.get_environment_boolean('AUTOSTART') + + with self._remote_mount_point(location) as remote_mnt, \ + self._remote_mount(location, root_disk, remote_mnt), \ + self._created_version_root(location, remote_mnt, + version_label) as version_root, \ + self._created_orig_subvolume(location, remote_mnt, + version_root) as orig: + self.populate_remote_orig(location, orig, temp_root) + system_root = os.path.join(remote_mnt, 'systems', + version_label, 'orig') + config_sync = os.path.join(system_root, 'usr', 'bin', + 'baserock-system-config-sync') + version_manager = os.path.join(system_root, 'usr', 'bin', + 'system-version-manager') + with self._deployed_version(location, version_label, + config_sync, version_manager): + self.status(msg='Setting %(v)s as the new default system', + v=version_label) + cliapp.ssh_runcmd(location, [version_manager, + 'set-default', version_label]) + + if autostart: + self.status(msg="Rebooting into new system ...") + ssh_runcmd_ignore_failure(location, ['reboot']) + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + + self.upgrade_remote_system(location, temp_root) + + +SshRsyncWriteExtension().run() diff --git a/extensions/ssh-rsync.write.help b/extensions/ssh-rsync.write.help new file mode 100644 index 00000000..f3f79ed5 --- /dev/null +++ b/extensions/ssh-rsync.write.help @@ -0,0 +1,50 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + + Upgrade a Baserock system which is already deployed: + - as a KVM/LibVirt, OpenStack or vbox-ssh virtual machine; + - on a Jetson board. + + Copies a binary delta over to the target system and arranges for it + to be bootable. + + The recommended way to use this extension is by calling `morph upgrade`. + Using `morph deploy --upgrade` is deprecated. + + The upgrade will fail if: + - no VM is deployed and running at `location`; + - the target system is not a Baserock system; + - the target's filesystem and its layout are not compatible with that + created by `morph deploy`." + + See also the 'Upgrading a Baserock installation' section of the 'Using + Baserock` page at wiki.baserock.org + http://wiki.baserock.org/devel-with/#index8h2 + + Parameters: + + * location: the 'user@hostname' string that will be used by ssh and rsync. + 'user' will always be `root` and `hostname` the hostname or address of + the system being upgraded. + + * VERSION_LABEL=label - **(MANDATORY)** should contain only alpha-numeric + characters and the '-' (hyphen) character. + + * AUTOSTART=` - boolean. If it is set, the VM will be started when + it has been deployed. + + (See `morph help deploy` for details of how to pass parameters to write + extensions) diff --git a/extensions/sshkeys.configure b/extensions/sshkeys.configure new file mode 100755 index 00000000..7a5a8379 --- /dev/null +++ b/extensions/sshkeys.configure @@ -0,0 +1,25 @@ +#!/bin/sh +# +# Copyright 2014 Codethink Ltd +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +if [ "$SSHKEYS" ] +then + install -d -m 700 "$1/root/.ssh" + echo Adding Key in "$SSHKEYS" to authorized_keys file + cat $SSHKEYS >> "$1/root/.ssh/authorized_keys" +fi diff --git a/extensions/strip-gplv3.configure b/extensions/strip-gplv3.configure new file mode 100755 index 00000000..c08061ad --- /dev/null +++ b/extensions/strip-gplv3.configure @@ -0,0 +1,101 @@ +#!/usr/bin/python +# Copyright (C) 2013 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +''' A Morph configuration extension for removing gplv3 chunks from a system + +Using a hard-coded list of chunks, it will read the system's /baserock metadata +to find the files created by that chunk, then remove them. + +''' + +import cliapp +import re +import os +import json + +class StripGPLv3ConfigureExtension(cliapp.Application): + gplv3_chunks = [ + ['autoconf', ''], + ['automake', ''], + ['bash', ''], + ['binutils', ''], + ['bison', ''], + ['ccache', ''], + ['cmake', ''], + ['flex', ''], + ['gawk', ''], + ['gcc', r'^.*lib.*\.so(\.\d+)*$'], + ['gdbm', ''], + ['gettext', ''], + ['gperf', ''], + ['groff', ''], + ['libtool', r'^.*lib.*\.so(\.\d+)*$'], + ['m4', ''], + ['make', ''], + ['nano', ''], + ['patch', ''], + ['rsync', ''], + ['texinfo-tarball', ''], + ] + + def process_args(self, args): + target_root = args[0] + meta_dir = os.path.join(target_root, 'baserock') + + for chunk in self.gplv3_chunks: + regex = os.path.join(meta_dir, "%s-[^-]\+\.meta" % chunk[0]) + artifacts = self.runcmd(['find', meta_dir, '-regex', regex]) + + for artifact in artifacts.split(): + self.remove_chunk(target_root, artifact, chunk[1]) + + os.symlink(os.path.join(os.sep, 'bin', 'busybox'), + os.path.join(target_root, 'usr', 'bin', 'awk')) + + def remove_chunk(self, target_root, chunk, pattern): + chunk_meta_path = os.path.join(target_root, 'baserock', chunk) + + with open(chunk_meta_path, 'r') as f: + chunk_meta_data = json.load(f) + + if not 'contents' in chunk_meta_data: + raise cliapp.AppError('Chunk %s does not have a "contents" list' + % chunk) + updated_contents = [] + for content_entry in reversed(chunk_meta_data['contents']): + pat = re.compile(pattern) + if len(pattern) == 0 or not pat.match(content_entry): + self.remove_content_entry(target_root, content_entry) + else: + updated_contents.append(content_entry) + + def remove_content_entry(self, target_root, content_entry): + entry_path = os.path.join(target_root, './' + content_entry) + if not entry_path.startswith(target_root): + raise cliapp.AppException('%s is not in %s' + % (entry_path, target_root)) + if os.path.exists(entry_path): + if os.path.islink(entry_path): + os.unlink(entry_path) + elif os.path.isfile(entry_path): + os.remove(entry_path) + elif os.path.isdir(entry_path): + if not os.listdir(entry_path): + os.rmdir(entry_path) + else: + raise cliapp.AppException('%s is not a link, file or directory' + % entry_path) +StripGPLv3ConfigureExtension().run() diff --git a/extensions/swift-build-rings.yml b/extensions/swift-build-rings.yml new file mode 100644 index 00000000..1ffe9c37 --- /dev/null +++ b/extensions/swift-build-rings.yml @@ -0,0 +1,34 @@ +--- +- hosts: localhost + vars: + - rings: + - { name: account, port: 6002 } + - { name: container, port: 6001 } + - { name: object, port: 6000 } + remote_user: root + tasks: + - file: path={{ ansible_env.ROOT }}/etc/swift owner=root group=root state=directory + + - name: Create ring + shell: swift-ring-builder {{ item.name }}.builder create {{ ansible_env.SWIFT_PART_POWER }} + {{ ansible_env.SWIFT_REPLICAS }} {{ ansible_env.SWIFT_MIN_PART_HOURS }} + with_items: rings + + - name: Add each storage node to the ring + shell: swift-ring-builder {{ item[0].name }}.builder + add r1z1-{{ item[1].ip }}:{{ item[0].port }}/{{ item[1].device }} {{ item[1].weight }} + with_nested: + - rings + - ansible_env.SWIFT_STORAGE_DEVICES + + - name: Rebalance the ring + shell: swift-ring-builder {{ item.name }}.builder rebalance {{ ansible_env.SWIFT_REBALANCE_SEED }} + with_items: rings + + - name: Copy ring configuration files into place + copy: src={{ item.name }}.ring.gz dest={{ ansible_env.ROOT }}/etc/swift + with_items: rings + + - name: Copy ring builder files into place + copy: src={{ item.name }}.builder dest={{ ansible_env.ROOT }}/etc/swift + with_items: rings diff --git a/extensions/swift-storage-devices-validate.py b/extensions/swift-storage-devices-validate.py new file mode 100755 index 00000000..57ab23d0 --- /dev/null +++ b/extensions/swift-storage-devices-validate.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# This is used by the openstack-swift.configure extension +# to validate any provided storage device specifiers +# under SWIFT_STORAGE_DEVICES +# + + +''' + This is used by the swift-storage.configure extension + to validate any storage device specifiers specified + in the SWIFT_STORAGE_DEVICES environment variable +''' + +from __future__ import print_function + +import yaml +import sys + +EXAMPLE_DEVSPEC = '{device: sdb1, ip: 127.0.0.1, weight: 100}' +REQUIRED_KEYS = ['ip', 'device', 'weight'] + +def err(msg): + print(msg, file=sys.stderr) + sys.exit(1) + +if len(sys.argv) != 2: + err('usage: %s STRING_TO_BE_VALIDATED' % sys.argv[0]) + +swift_storage_devices = yaml.load(sys.argv[1]) + +if not isinstance(swift_storage_devices, list): + err('Expected list of device specifiers\n' + 'Example: [%s]' % EXAMPLE_DEVSPEC) + +for d in swift_storage_devices: + if not isinstance(d, dict): + err("Invalid device specifier: `%s'\n" + 'Device specifier must be a dictionary\n' + 'Example: %s' % (d, EXAMPLE_DEVSPEC)) + + if set(d.keys()) != set(REQUIRED_KEYS): + err("Invalid device specifier: `%s'\n" + 'Specifier should contain: %s\n' + 'Example: %s' % (d, str(REQUIRED_KEYS)[1:-1], EXAMPLE_DEVSPEC)) diff --git a/extensions/swift-storage.configure b/extensions/swift-storage.configure new file mode 100644 index 00000000..391b392a --- /dev/null +++ b/extensions/swift-storage.configure @@ -0,0 +1,107 @@ +#!/bin/bash +# +# Copyright © 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +set -e + +# The ansible script needs to know where the rootfs is, so we export it here +export ROOT="$1" + +validate_number() { + local name="$1" + local value="$2" + + local pattern='^[0-9]+$' + if ! [[ $value =~ $pattern ]] + then + echo "'$name' must be a number" >&2 + exit 1 + fi +} + +validate_non_empty() { + local name="$1" + local value="$2" + + if [[ $value = None ]] + then + echo "'$name' cannot be empty" >&2 + exit 1 + fi +} + +MANDATORY_OPTIONS="SWIFT_HASH_PATH_PREFIX \ + SWIFT_HASH_PATH_SUFFIX \ + SWIFT_REBALANCE_SEED \ + SWIFT_PART_POWER \ + SWIFT_REPLICAS \ + SWIFT_MIN_PART_HOURS \ + SWIFT_STORAGE_DEVICES \ + CONTROLLER_HOST_ADDRESS \ + MANAGEMENT_INTERFACE_IP_ADDRESS" + +for option in $MANDATORY_OPTIONS +do + if ! [[ -v $option ]] + then + missing_option=True + echo "Required option $option isn't set!" >&2 + fi +done + +if [[ $missing_option = True ]]; then exit 1; fi + +./swift-storage-devices-validate.py "$SWIFT_STORAGE_DEVICES" + +# Validate SWIFT_PART_POWER, SWIFT_REPLICAS, SWIFT_MIN_PART_HOURS +# just make sure they're numbers + +validate_number "SWIFT_PART_POWER" "$SWIFT_PART_POWER" +validate_number "SWIFT_REPLICAS" "$SWIFT_REPLICAS" +validate_number "SWIFT_MIN_PART_HOURS" "$SWIFT_MIN_PART_HOURS" + +# Make sure these aren't empty +validate_non_empty "SWIFT_HASH_PATH_PREFIX" "$SWIFT_HASH_PATH_PREFIX" +validate_non_empty "SWIFT_HASH_PATH_SUFFIX" "$SWIFT_HASH_PATH_SUFFIX" +validate_non_empty "SWIFT_REBALANCE_SEED" "$SWIFT_REBALANCE_SEED" +validate_non_empty "CONTROLLER_HOST_ADDRESS" "$CONTROLLER_HOST_ADDRESS" +validate_non_empty "MANAGEMENT_INTERFACE_IP_ADDRESS" "$MANAGEMENT_INTERFACE_IP_ADDRESS" + +mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks + +# A swift controller needs the storage setup service +# but does not want any of the other storage services enabled +ln -s "/usr/lib/systemd/system/swift-storage-setup.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage-setup.service" + +SWIFT_CONTROLLER=${SWIFT_CONTROLLER:-False} + +if [[ $SWIFT_CONTROLLER = False ]] +then + ln -s "/usr/lib/systemd/system/rsync.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/rsync.service" + ln -s "/usr/lib/systemd/system/swift-storage.service" \ + "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage.service" +fi + +# Build swift data structures (the rings) +/usr/bin/ansible-playbook -i hosts swift-build-rings.yml + +cat << EOF > "$ROOT"/usr/share/swift/swift-storage-vars.yml +--- +MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS +SWIFT_HASH_PATH_PREFIX: $SWIFT_HASH_PATH_PREFIX +SWIFT_HASH_PATH_SUFFIX: $SWIFT_HASH_PATH_SUFFIX +EOF diff --git a/extensions/sysroot.check b/extensions/sysroot.check new file mode 100755 index 00000000..71b35175 --- /dev/null +++ b/extensions/sysroot.check @@ -0,0 +1,23 @@ +#!/bin/sh +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +# Preparatory checks for Morph 'sysroot' write extension + +set -eu + +if [ "$UPGRADE" == "yes" ]; then + echo >&2 "ERROR: Cannot upgrade a sysroot deployment" + exit 1 +fi diff --git a/extensions/sysroot.write b/extensions/sysroot.write new file mode 100755 index 00000000..46f1a780 --- /dev/null +++ b/extensions/sysroot.write @@ -0,0 +1,22 @@ +#!/bin/sh +# Copyright (C) 2014,2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +# A Morph write extension to deploy to another directory + +set -eu + +mkdir -p "$2" + +cp -a "$1"/* "$2" diff --git a/extensions/tar.check b/extensions/tar.check new file mode 100755 index 00000000..f2304d46 --- /dev/null +++ b/extensions/tar.check @@ -0,0 +1,23 @@ +#!/bin/sh +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +# Preparatory checks for Morph 'tar' write extension + +set -eu + +if [ "$UPGRADE" == "yes" ]; then + echo >&2 "ERROR: Cannot upgrade a tar file deployment." + exit 1 +fi diff --git a/extensions/tar.write b/extensions/tar.write new file mode 100755 index 00000000..01b545b4 --- /dev/null +++ b/extensions/tar.write @@ -0,0 +1,20 @@ +#!/bin/sh +# Copyright (C) 2013,2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +# A Morph write extension to deploy to a .tar file + +set -eu + +tar -C "$1" -cf "$2" . diff --git a/extensions/tar.write.help b/extensions/tar.write.help new file mode 100644 index 00000000..b45c61fa --- /dev/null +++ b/extensions/tar.write.help @@ -0,0 +1,19 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + Create a .tar file of the deployed system. + + The `location` argument is a pathname to the .tar file to be + created. diff --git a/extensions/trove.configure b/extensions/trove.configure new file mode 100755 index 00000000..f823762c --- /dev/null +++ b/extensions/trove.configure @@ -0,0 +1,148 @@ +#!/bin/sh +# +# Copyright (C) 2013 - 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# This is a "morph deploy" configuration extension to fully configure +# a Trove instance at deployment time. It uses the following variables +# from the environment (run `morph help trove.configure` to see a description +# of them): +# +# * TROVE_ID +# * TROVE_HOSTNAME (optional, defaults to TROVE_ID) +# * TROVE_COMPANY +# * LORRY_SSH_KEY +# * UPSTREAM_TROVE +# * UPSTREAM_TROVE_PROTOCOL +# * TROVE_ADMIN_USER +# * TROVE_ADMIN_EMAIL +# * TROVE_ADMIN_NAME +# * TROVE_ADMIN_SSH_PUBKEY +# * LORRY_CONTROLLER_MINIONS (optional, defaults to 4) +# * TROVE_BACKUP_KEYS - a space-separated list of paths to SSH keys. +# (optional) +# * TROVE_GENERIC (optional) +# +# The configuration of a Trove is slightly tricky: part of it has to +# be run on the configured system after it has booted. We accomplish +# this by copying in all the relevant data to the target system +# (in /var/lib/trove-setup), and creating a systemd unit file that +# runs on the first boot. The first boot will be detected by the +# existence of the /var/lib/trove-setup/needed file. + +set -e + +if [ "$TROVE_GENERIC" ] +then + echo "Not configuring the trove, it will be generic" + exit 0 +fi + + +# Check that all the variables needed are present: + +error_vars=false +if test "x$TROVE_ID" = "x"; then + echo "ERROR: TROVE_ID needs to be defined." + error_vars=true +fi + +if test "x$TROVE_COMPANY" = "x"; then + echo "ERROR: TROVE_COMPANY needs to be defined." + error_vars=true +fi + +if test "x$TROVE_ADMIN_USER" = "x"; then + echo "ERROR: TROVE_ADMIN_USER needs to be defined." + error_vars=true +fi + +if test "x$TROVE_ADMIN_NAME" = "x"; then + echo "ERROR: TROVE_ADMIN_NAME needs to be defined." + error_vars=true +fi + +if test "x$TROVE_ADMIN_EMAIL" = "x"; then + echo "ERROR: TROVE_ADMIN_EMAIL needs to be defined." + error_vars=true +fi + +if ! ssh-keygen -lf $LORRY_SSH_KEY > /dev/null 2>&1 +then + echo "ERROR: LORRY_SSH_KEY is not a vaild ssh key." + error_vars=true +fi + +if ! ssh-keygen -lf $WORKER_SSH_PUBKEY > /dev/null 2>&1 +then + echo "ERROR: WORKER_SSH_PUBKEY is not a vaild ssh key." + error_vars=true +fi + +if ! ssh-keygen -lf $TROVE_ADMIN_SSH_PUBKEY > /dev/null 2>&1 +then + echo "ERROR: TROVE_ADMIN_SSH_PUBKEY is not a vaild ssh key." + error_vars=true +fi + +if "$error_vars"; then + exit 1 +fi + +ROOT="$1" + + +TROVE_DATA="$ROOT/etc/trove" +mkdir -p "$TROVE_DATA" + +install -m 0600 "$LORRY_SSH_KEY" "$TROVE_DATA/lorry.key" +install -m 0644 "${LORRY_SSH_KEY}.pub" "$TROVE_DATA/lorry.key.pub" +install -m 0644 "$TROVE_ADMIN_SSH_PUBKEY" "$TROVE_DATA/admin.key.pub" +install -m 0644 "$WORKER_SSH_PUBKEY" "$TROVE_DATA/worker.key.pub" + + +python <<'EOF' >"$TROVE_DATA/trove.conf" +import os, sys, yaml + +trove_configuration={ + 'TROVE_ID': os.environ['TROVE_ID'], + 'TROVE_COMPANY': os.environ['TROVE_COMPANY'], + 'TROVE_ADMIN_USER': os.environ['TROVE_ADMIN_USER'], + 'TROVE_ADMIN_EMAIL': os.environ['TROVE_ADMIN_EMAIL'], + 'TROVE_ADMIN_NAME': os.environ['TROVE_ADMIN_NAME'], + 'LORRY_SSH_KEY': '/etc/trove/lorry.key', + 'LORRY_SSH_PUBKEY': '/etc/trove/lorry.key.pub', + 'TROVE_ADMIN_SSH_PUBKEY': '/etc/trove/admin.key.pub', + 'WORKER_SSH_PUBKEY': '/etc/trove/worker.key.pub', +} + + + +optional_keys = ('MASON_ID', 'HOSTNAME', 'TROVE_HOSTNAME', + 'LORRY_CONTROLLER_MINIONS', 'TROVE_BACKUP_KEYS', + 'UPSTREAM_TROVE', 'UPSTREAM_TROVE_PROTOCOL') + +for key in optional_keys: + if key in os.environ: + trove_configuration[key]=os.environ[key] + +yaml.dump(trove_configuration, sys.stdout, default_flow_style=False) +EOF + +if [ -n "$TROVE_BACKUP_KEYS" ]; then + mkdir -p "$TROVE_DATA/backup-keys" + cp -- $TROVE_BACKUP_KEYS "$TROVE_DATA/backup-keys" + echo "TROVE_BACKUP_KEYS: /etc/trove/backup-keys/*" >> "$TROVE_DATA/trove.conf" +fi diff --git a/extensions/trove.configure.help b/extensions/trove.configure.help new file mode 100644 index 00000000..c96bdf74 --- /dev/null +++ b/extensions/trove.configure.help @@ -0,0 +1,126 @@ +help: | + This is a "morph deploy" configuration extension to fully configure + a Trove instance at deployment time. It uses the following + configuration variables: + + * `TROVE_ID` + * `TROVE_HOSTNAME` (optional, defaults to `TROVE_ID`) + * `TROVE_COMPANY` + * `LORRY_SSH_KEY` + * `UPSTREAM_TROVE` + * `TROVE_ADMIN_USER` + * `TROVE_ADMIN_EMAIL` + * `TROVE_ADMIN_NAME` + * `TROVE_ADMIN_SSH_PUBKEY` + * `LORRY_CONTROLLER_MINIONS` (optional, defaults to 4) + * `TROVE_BACKUP_KEYS` - a space-separated list of paths to SSH keys. + (optional) + + The variables are described in more detail below. + + A Trove deployment needs to know the following things: + + * The Trove's ID and public name. + * The Trove's administrator name and access details. + * Private and public SSH keys for the Lorry user on the Trove. + * Which upstream Trove it should be set to mirror upon initial deploy. + + These are specified with the configuration variables described in this + help. + + * `TROVE_GENERIC` -- boolean. If it's true the trove will be generic + and it won't be configured with any of the other variables listed + here. + + * `TROVE_ID` -- the identifier of the Trove. This separates it from + other Troves, and allows mirroring of Troves to happen without local + changes getting overwritten. + + The Trove ID is used in several ways. Any local repositories (those not + mirrored from elsewhere) get created under a prefix that is the ID. + Thus, the local repositories on the `git.baserock.org` Trove, whose + Trove ID is `baserock`, are named + `baserock/baserock/definitions.git` and similar. The ID is used + there twice: first as a prefix and then as a "project name" within + that prefix. There can be more projects under the prefix. For + example, there is a `baserock/local-config/lorries.git` repository, + where `local-config` is a separate project from `baserock`. Projects + here are a concept for the Trove's git access control language. + + The Trove ID also used as the prefix for any branch and tag names + created locally for repositories that are not local. Thus, in the + `delta/linux.git` repository, any local branches would be called + something like `baserock/morph`, instead of just `morph`. The + Trove's git access control prevents normal uses from pushing + branches and tags that do not have the Trove ID as the prefix. + + * `TROVE_HOSTNAME` -- the public name of the Trove. This is an + optional setting, and defaults to `TROVE_ID`. The public name is + typically the domain name of the server (e.g., `git.baserock.org`), + but can also be an IP address. This setting is used when Trove needs + to generate URLs that point to itself, such as the `git://` and + `http://` URLs for each git repository that is viewed via the web + interface. + + Note that this is _not_ the system hostname. That is set separately, + with the `HOSTNAME` configuration setting (see the + `set-hostname.configure` extension). + + * `TROVE_COMPANY` -- a description of the organisation who own the + Trove. This is shown in various parts of the web interface of the + Trove. It is for descriptive purposes only. + + * `LORRY_SSH_KEY` -- ssh key pair that the Trove's Lorry will use to + access an upstream Trove, and to push updates to the Trove's git + server. + + The value is a filename on the system doing the deployment (where + `morph deploy` is run). The file contains the _private_ key, and the + public key is in a file with the `.pub` suffix added to the name. + + The upstream Trove needs to be configured to allow this key to + access it. This configuration does not do that automatically. + + * `UPSTREAM_TROVE` -- public name of the upstream Trove (domain + name or IP address). This is an optional setting. If it's set, + the new Trove will be configured to mirror that Trove. + + * `TROVE_ADMIN_USER`, `TROVE_ADMIN_EMAIL`, `TROVE_ADMIN_NAME`, + `TROVE_ADMIN_SSH_PUBKEY` -- details of the Trove's (initial) + administrator. + + Each Trove needs at least one administrator user, and one is created + upon initial deployment. `TROVE_ADMIN_USER` is the username of the + account to be created, `TROVE_ADMIN_EMAIL` should be the e-mail of + the user, and `TROVE_ADMIN_NAME` is their name. If more + administrators are needed, the initial person should create them + using the usual Gitano commands. + + * `LORRY_CONTROLLER_MINIONS` -- the number of Lorry Controller worker + processes to start. This is an optional setting and defaults to 4. + The more workers are running, the more Lorry jobs can run at the same + time, but the more resources they require. + + * `TROVE_BACKUP_KEYS` -- a space-separated list of paths to SSH keys. + If this is set, the Trove will have a backup user that can be accessed + with rsync using the SSH keys provided. + + Example + ------- + + The following set of variables could be to deploy a Trove instance: + + TROVE_ID: my-trove + TROVE_HOSTNAME: my-trove.example.com + TROVE_COMPANY: My Personal Trove for Me, Myself and I + LORRY_SSH_KEY: my-trove/lorry.key + UPSTREAM_TROVE: git.baserock.org + UPSTREAM_TROVE_USER: my-trove + UPSTREAM_TROVE_EMAIL: my-trove@example.com + TROVE_ADMIN_USER: tomjon + TROVE_ADMIN_EMAIL: tomjon@example.com + TROVE_ADMIN_NAME: Tomjon of Lancre + TROVE_ADMIN_SSH_PUBKEY: my-trove/tomjon.key.pub + + These would be put into the cluster morphology used to do the + deployment. diff --git a/extensions/vagrant.configure b/extensions/vagrant.configure new file mode 100644 index 00000000..abc3ea0c --- /dev/null +++ b/extensions/vagrant.configure @@ -0,0 +1,55 @@ +#!/bin/sh +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License.5 +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +if test "x$VAGRANT" = "x"; then + exit 0 +fi + +for needed in etc/ssh/sshd_config etc/sudoers; do + if ! test -e "$ROOT/$needed"; then + echo >&2 "Unable to find $needed" + echo >&2 "Cannot continue configuring as Vagrant basebox" + exit 1 + fi +done + +# SSH daemon needs to be configured to not use DNS... +sed -i -e's/^(.*[Uu]][Ss][Ee][Dd][Nn][Ss].*)$/#\1/' "$ROOT/etc/ssh/sshd_config" +echo "UseDNS no" >> "$ROOT/etc/ssh/sshd_config" + +# We need to add a vagrant user with "vagrant" as the password We're doing this +# manually because chrooting in to run adduser is not really allowed for +# deployment time since we wouldn't be able to run the adduser necessarily. In +# practice for now we'd be able to because we can't deploy raw disks +# cross-platform and expect extlinux to install but we won't, for good +# practice and to hilight this deficiency. +echo 'vagrant:x:1000:1000:Vagrant User:/home/vagrant:/bin/bash' >> "$ROOT/etc/passwd" +echo 'vagrant:/6PTOoWylhw3w:16198:0:99999:7:::' >> "$ROOT/etc/shadow" +echo 'vagrant:x:1000:' >> "$ROOT/etc/group" +mkdir -p "$ROOT/home/vagrant" +chown -R 1000:1000 "$ROOT/home/vagrant" + +# Next, the vagrant user is meant to have sudo access +echo 'vagrant ALL=(ALL) NOPASSWD: ALL' >> "$ROOT/etc/sudoers" + +# And ensure that we get sbin in our path +echo 'PATH="$PATH:/sbin:/usr/sbin"' >> "$ROOT/etc/profile" +echo 'export PATH' >> "$ROOT/etc/profile" + diff --git a/extensions/vdaboot.configure b/extensions/vdaboot.configure new file mode 100755 index 00000000..60de925b --- /dev/null +++ b/extensions/vdaboot.configure @@ -0,0 +1,33 @@ +#!/bin/sh +# Copyright (C) 2013,2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +# Change the "/" mount point to /dev/vda to use virtio disks. + +set -e + +if [ "$OPENSTACK_USER" ] +then + # Modifying fstab + if [ -f "$1/etc/fstab" ] + then + mv "$1/etc/fstab" "$1/etc/fstab.old" + awk 'BEGIN {print "/dev/vda / btrfs defaults,rw,noatime 0 1"}; + $2 != "/" {print $0 };' "$1/etc/fstab.old" > "$1/etc/fstab" + rm "$1/etc/fstab.old" + else + echo "/dev/vda / btrfs defaults,rw,noatime 0 1"> "$1/etc/fstab" + fi +fi diff --git a/extensions/virtualbox-ssh.check b/extensions/virtualbox-ssh.check new file mode 100755 index 00000000..a97f3294 --- /dev/null +++ b/extensions/virtualbox-ssh.check @@ -0,0 +1,36 @@ +#!/usr/bin/python +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + +'''Preparatory checks for Morph 'virtualbox-ssh' write extension''' + +import cliapp + +import morphlib.writeexts + + +class VirtualBoxPlusSshCheckExtension(morphlib.writeexts.WriteExtension): + def process_args(self, args): + if len(args) != 1: + raise cliapp.AppException('Wrong number of command line args') + + self.require_btrfs_in_deployment_host_kernel() + + upgrade = self.get_environment_boolean('UPGRADE') + if upgrade: + raise cliapp.AppException( + 'Use the `ssh-rsync` write extension to deploy upgrades to an ' + 'existing remote system.') + +VirtualBoxPlusSshCheckExtension().run() diff --git a/extensions/virtualbox-ssh.write b/extensions/virtualbox-ssh.write new file mode 100755 index 00000000..774f2b4f --- /dev/null +++ b/extensions/virtualbox-ssh.write @@ -0,0 +1,211 @@ +#!/usr/bin/python +# Copyright (C) 2012-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . + + +'''A Morph deployment write extension for deploying to VirtualBox via ssh. + +VirtualBox is assumed to be running on a remote machine, which is +accessed over ssh. The machine gets created, but not started. + +See file virtualbox-ssh.write.help for documentation + +''' + + +import cliapp +import os +import re +import sys +import time +import tempfile +import urlparse + +import morphlib.writeexts + + +class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): + + def process_args(self, args): + if len(args) != 2: + raise cliapp.AppException('Wrong number of command line args') + + temp_root, location = args + ssh_host, vm_name, vdi_path = self.parse_location(location) + autostart = self.get_environment_boolean('AUTOSTART') + + vagrant = self.get_environment_boolean('VAGRANT') + + fd, raw_disk = tempfile.mkstemp() + os.close(fd) + self.create_local_system(temp_root, raw_disk) + + try: + self.transfer_and_convert_to_vdi( + raw_disk, ssh_host, vdi_path) + self.create_virtualbox_guest(ssh_host, vm_name, vdi_path, + autostart, vagrant) + except BaseException: + sys.stderr.write('Error deploying to VirtualBox') + os.remove(raw_disk) + cliapp.ssh_runcmd(ssh_host, ['rm', '-f', vdi_path]) + raise + else: + os.remove(raw_disk) + self.status( + msg='Virtual machine %(vm_name)s has been created', + vm_name=vm_name) + + def parse_location(self, location): + '''Parse the location argument to get relevant data.''' + + x = urlparse.urlparse(location) + if x.scheme != 'vbox+ssh': + raise cliapp.AppException( + 'URL schema must be vbox+ssh in %s' % location) + m = re.match('^/(?P[^/]+)(?P/.+)$', x.path) + if not m: + raise cliapp.AppException('Cannot parse location %s' % location) + return x.netloc, m.group('guest'), m.group('path') + + def transfer_and_convert_to_vdi(self, raw_disk, ssh_host, vdi_path): + '''Transfer raw disk image to VirtualBox host, and convert to VDI.''' + + self.status(msg='Transfer disk and convert to VDI') + + st = os.lstat(raw_disk) + xfer_hole_path = morphlib.util.get_data_path('xfer-hole') + recv_hole = morphlib.util.get_data('recv-hole') + + ssh_remote_cmd = [ + 'sh', '-c', recv_hole, + 'dummy-argv0', 'vbox', vdi_path, str(st.st_size), + ] + + cliapp.runcmd( + ['python', xfer_hole_path, raw_disk], + ['ssh', ssh_host] + map(cliapp.shell_quote, ssh_remote_cmd), + stdout=None, stderr=None) + + def virtualbox_version(self, ssh_host): + 'Get the version number of the VirtualBox running on the remote host.' + + # --version gives a build id, which looks something like + # 1.2.3r456789, so we need to strip the suffix off and get a tuple + # of the (major, minor, patch) version, since comparing with a + # tuple is more reliable than a string and more convenient than + # comparing against the major, minor and patch numbers directly + self.status(msg='Checking version of remote VirtualBox') + build_id = cliapp.ssh_runcmd(ssh_host, ['VBoxManage', '--version']) + version_string = re.match(r"^([0-9\.]+).*$", build_id.strip()).group(1) + return tuple(int(s or '0') for s in version_string.split('.')) + + def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart, + vagrant): + '''Create the VirtualBox virtual machine.''' + + self.status(msg='Create VirtualBox virtual machine') + + ram_mebibytes = str(self.get_ram_size() / (1024**2)) + + vcpu_count = str(self.get_vcpu_count()) + + if not vagrant: + hostonly_iface = self.get_host_interface(ssh_host) + + if self.virtualbox_version(ssh_host) < (4, 3, 0): + sataportcount_option = '--sataportcount' + else: + sataportcount_option = '--portcount' + + commands = [ + ['createvm', '--name', vm_name, '--ostype', 'Linux26_64', + '--register'], + ['modifyvm', vm_name, '--ioapic', 'on', + '--memory', ram_mebibytes, '--cpus', vcpu_count], + ['storagectl', vm_name, '--name', 'SATA Controller', + '--add', 'sata', '--bootable', 'on', sataportcount_option, '2'], + ['storageattach', vm_name, '--storagectl', 'SATA Controller', + '--port', '0', '--device', '0', '--type', 'hdd', '--medium', + vdi_path], + ] + if vagrant: + commands[1].extend(['--nic1', 'nat', + '--natnet1', 'default']) + else: + commands[1].extend(['--nic1', 'hostonly', + '--hostonlyadapter1', hostonly_iface, + '--nic2', 'nat', '--natnet2', 'default']) + + attach_disks = self.parse_attach_disks() + for device_no, disk in enumerate(attach_disks, 1): + cmd = ['storageattach', vm_name, + '--storagectl', 'SATA Controller', + '--port', str(device_no), + '--device', '0', + '--type', 'hdd', + '--medium', disk] + commands.append(cmd) + + if autostart: + commands.append(['startvm', vm_name]) + + for command in commands: + argv = ['VBoxManage'] + command + cliapp.ssh_runcmd(ssh_host, argv) + + def get_host_interface(self, ssh_host): + host_ipaddr = os.environ.get('HOST_IPADDR') + netmask = os.environ.get('NETMASK') + + if host_ipaddr is None: + raise cliapp.AppException('HOST_IPADDR was not given') + + if netmask is None: + raise cliapp.AppException('NETMASK was not given') + + # 'VBoxManage list hostonlyifs' retrieves a list with the hostonly + # interfaces on the host. For each interface, the following lines + # are shown on top: + # + # Name: vboxnet0 + # GUID: 786f6276-656e-4074-8000-0a0027000000 + # Dhcp: Disabled + # IPAddress: 192.168.100.1 + # + # The following command tries to retrieve the hostonly interface + # name (e.g. vboxnet0) associated with the given ip address. + iface = None + lines = cliapp.ssh_runcmd(ssh_host, + ['VBoxManage', 'list', 'hostonlyifs']).splitlines() + for i, v in enumerate(lines): + if host_ipaddr in v: + iface = lines[i-3].split()[1] + break + + if iface is None: + iface = cliapp.ssh_runcmd(ssh_host, + ['VBoxManage', 'hostonlyif', 'create']) + # 'VBoxManage hostonlyif create' shows the name of the + # created hostonly interface inside single quotes + iface = iface[iface.find("'") + 1 : iface.rfind("'")] + cliapp.ssh_runcmd(ssh_host, + ['VBoxManage', 'hostonlyif', + 'ipconfig', iface, + '--ip', host_ipaddr, + '--netmask', netmask]) + + return iface + +VirtualBoxPlusSshWriteExtension().run() diff --git a/extensions/virtualbox-ssh.write.help b/extensions/virtualbox-ssh.write.help new file mode 100644 index 00000000..2dbf988c --- /dev/null +++ b/extensions/virtualbox-ssh.write.help @@ -0,0 +1,135 @@ +# Copyright (C) 2014, 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, see . + +help: | + + Deploy a Baserock system as a *new* VirtualBox virtual machine. + (Use the `ssh-rsync` write extension to deploy upgrades to an *existing* + VM) + + Connects to HOST via ssh to run VirtualBox's command line management tools. + + Parameters: + + * location: a custom URL scheme of the form `vbox+ssh://HOST/GUEST/PATH`, + where: + * HOST is the name of the host on which VirtualBox is running + * GUEST is the name of the guest VM on that host + * PATH is the path to the disk image that should be created, + on that host. For example, + `vbox+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where + * `alice@192.168.122.1` is the target host as given to ssh, + **from within the development host** (which may be + different from the target host's normal address); + * `testsys` is the name of the new guest VM'; + * `/home/alice/testys.img` is the pathname of the disk image files + on the target host. + + * HOSTNAME=name: the hostname of the **guest** VM within the network into + which it is being deployed. + + * DISK_SIZE=X: **(MANDATORY)** the size of the VM's primary virtual hard + disk. `X` should use a suffix of `K`, `M`, or `G` (in upper or lower + case) to indicate kilo-, mega-, or gigabytes. For example, + `DISK_SIZE=100G` would create a 100 gigabyte virtual hard disk. + + * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate + for itself from the host. `X` is interpreted in the same as for + DISK_SIZE, and defaults to `1G`. + + * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do + not use more CPU cores than you have available physically (real cores, + no hyperthreads). + + * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to + tell Linux to use, rather than booting the rootfs directly. + + * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree + binary - Give the full path (without a leading /) to the location of the + DTB in the built system image . The deployment will fail if `path` does + not exist. + + * BOOTLOADER_INSTALL=value: the bootloader to be installed + **(MANDATORY)** for non-x86 systems + + allowed values = + - 'extlinux' (default) - the extlinux bootloader will + be installed + - 'none' - no bootloader will be installed by `morph deploy`. A + bootloader must be installed manually. This value must be used when + deploying non-x86 systems such as ARM. + + * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used. + If not specified for x86-32 and x86-64 systems, 'extlinux' will be used + + allowed values = + - 'extlinux' + + * KERNEL_ARGS=args: optional additional kernel command-line parameters to + be appended to the default set. The default set is: + + 'rw init=/sbin/init rootfstype=btrfs \ + rootflags=subvol=systems/default/run \ + root=[name or UUID of root filesystem]' + + (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt) + + * AUTOSTART= - boolean. If it is set, the VM will be started when + it has been deployed. + + * VAGRANT= - boolean. If it is set, then networking is configured + so that the VM will work with Vagrant. Otherwise networking is + configured to run directly in VirtualBox. + + * HOST_IPADDR= - the IP address of the VM host. + + * NETMASK= - the netmask of the VM host. + + * NETWORK_CONFIG= - `net_config` is used to set up the VM's + network interfaces. It is a string containing semi-colon separated + 'stanzas' where each stanza provides information about a network + interface. Each stanza is of the form name:type[,arg=value] e.g. + + lo:loopback + eth0:dhcp + eth1:static,address=10.0.0.1,netmask=255.255.0.0 + + An example of the NETWORK_CONFIG parameter (It should be in one line) + + `"lo:loopback;eth0:static,address=192.168.100.2,netmask=255.255.255.0; + eth1:dhcp,hostname=$(hostname)"` + + It is useful to configure one interface to use NAT to give the VM access + to the outside world and another interface to use the Virtual Box host + adapter to allow you to access the Trove from the host machine. + + The NAT interface eth1 is set up to use dhcp, the host-only adapter + interface is configured statically. + + Note: you must give the host-only adapter interface an address that lies + **on the same network** as the host adapter. So if the host adapter has + an IP of 192.168.100.1 eth0 should have an address such as + 192.168.100.42. + + The settings of the host adapter, including its IP can be changed either + in the VirtualBox manager UI + (https://www.virtualbox.org/manual/ch03.html#settings-network) + or via the VBoxManage command line + (https://www.virtualbox.org/manual/ch08.html#idp57572192) + + See Chapter 6 of the VirtualBox User Manual for more information about + virtual networking (https://www.virtualbox.org/manual/ch06.html) + + (See `morph help deploy` for details of how to pass parameters to write + extensions) diff --git a/fstab.configure b/fstab.configure deleted file mode 100755 index b9154eee..00000000 --- a/fstab.configure +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright © 2013-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# =*= License: GPL-2 =*= - - -import os -import sys - -import morphlib - -envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('FSTAB_')} - -conf_file = os.path.join(sys.argv[1], 'etc/fstab') -morphlib.util.write_from_dict(conf_file, envvars) diff --git a/hosts b/hosts deleted file mode 100644 index 5b97818d..00000000 --- a/hosts +++ /dev/null @@ -1 +0,0 @@ -localhost ansible_connection=local diff --git a/hosts.configure b/hosts.configure deleted file mode 100755 index 6b068d04..00000000 --- a/hosts.configure +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright © 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# =*= License: GPL-2 =*= - - -import os -import sys -import socket - -import morphlib - -def validate(var, line): - xs = line.split() - if len(xs) == 0: - raise morphlib.Error("`%s: %s': line is empty" % (var, line)) - - ip = xs[0] - hostnames = xs[1:] - - if len(hostnames) == 0: - raise morphlib.Error("`%s: %s': missing hostname" % (var, line)) - - family = socket.AF_INET6 if ':' in ip else socket.AF_INET - - try: - socket.inet_pton(family, ip) - except socket.error: - raise morphlib.Error("`%s: %s' invalid ip" % (var, ip)) - -envvars = {k: v for (k, v) in os.environ.iteritems() if k.startswith('HOSTS_')} - -conf_file = os.path.join(sys.argv[1], 'etc/hosts') -morphlib.util.write_from_dict(conf_file, envvars, validate) diff --git a/image-package-example/README b/image-package-example/README deleted file mode 100644 index c1322f25..00000000 --- a/image-package-example/README +++ /dev/null @@ -1,9 +0,0 @@ -Image package example scripts -============================= - -These are scripts used to create disk images or install the system onto -an existing disk. - -This is also implemented independently for the rawdisk.write write -extension; see morphlib.writeexts.WriteExtension.create_local_system() -for a similar, python implementation. diff --git a/image-package-example/common.sh.in b/image-package-example/common.sh.in deleted file mode 100644 index 9a7389a7..00000000 --- a/image-package-example/common.sh.in +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/false -# Script library to be used by disk-install.sh and make-disk-image.sh - -status(){ - echo "$@" -} - -info(){ - echo "$@" >&2 -} - -warn(){ - echo "$@" >&2 -} - -extract_rootfs(){ - tar -C "$1" -xf @@ROOTFS_TAR_PATH@@ . -} - -make_disk_image(){ - truncate --size "$1" "$2" -} - -format_disk(){ - local disk="$1" - mkfs.ext4 -F -L rootfs "$disk" -} - -install_fs_config(){ - local mountpoint="$1" - local rootdisk="${2-/dev/vda}" - cat >>"$mountpoint/etc/fstab" <&2 - exit 1 -} - -warn(){ - echo "$@" >&2 -} - -info(){ - echo "$@" >&2 -} - -shellescape(){ - echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'" -} - -sedescape(){ - # Escape the passed in string so it can be safely interpolated into - # a sed expression as a literal value. - echo "$1" | sed -e 's/[\/&]/\\&/g' -} - -ROOTDIR="$1" -OUTPUT_TAR="$2" -td="$(mktemp -d)" -IMAGE_SUBDIR="${IMAGE_SUBDIR-image_files}" -SCRIPT_SUBDIR="${SCRIPT_SUBDIR-tools}" -ROOTFS_TAR="${ROOTFS_TAR-rootfs.tar}" - -# Generate shell snippets that will expand to paths to various resources -# needed by the scripts. -# They expand to a single shell word, so constructs like the following work -# SCRIPT_DIR=@@SCRIPT_DIR@@ -# dd if="$SCRIPT_DIR/mbr" of="$disk" count=1 -# tar -C "$mountpoint" -xf @@ROOTFS_TAR_PATH@@ . -find_script_dir='"$(readlink -f "$(dirname "$0")")"' -image_dir="$find_script_dir/../$(shellescape "$IMAGE_SUBDIR")" -rootfs_tar_path="$image_dir/$(shellescape "$ROOTFS_TAR")" - -install_script(){ - local source_file="$1" - local output_dir="$2" - local target_file="$output_dir/$SCRIPT_SUBDIR/$(basename "$source_file" .in)" - sed -e "s/@@SCRIPT_DIR@@/$(sedescape "$find_script_dir")/g" \ - -e "s/@@IMAGE_DIR@@/$(sedescape "$image_dir")/g" \ - -e "s/@@ROOTFS_TAR_PATH@@/$(sedescape "$rootfs_tar_path")/g" \ - "$source_file" \ - | install -D -m 755 /proc/self/fd/0 "$target_file" -} - -install_scripts(){ - local output_dir="$1" - ( - IFS="${INCLUDE_SCRIPTS_SEPARATOR-:}" - for script in $INCLUDE_SCRIPTS; do - local script_path="$(pwd)/$script" - if [ ! -e "$script_path" ]; then - warn Script "$script" not found, ignoring - continue - fi - install_script "$script" "$output_dir" - done - ) -} - -install_bootloader_blobs(){ - local output_dir="$1" - local image_dir="$output_dir/$IMAGE_SUBDIR" - ( - IFS="${BOOTLOADER_BLOBS_SEPARATOR-:}" - for blob in $BOOTLOADER_BLOBS; do - local blob_path="$ROOTDIR/$blob" - if [ ! -e "$blob_path" ]; then - warn Bootloader blob "$blob" not found, ignoring - continue - fi - install -D -m644 "$blob_path" "$image_dir/$(basename "$blob_path")" - done - ) -} - -# Determine a basename for our directory as the same as our tarball with -# extensions removed. This is needed, since tarball packages usually -# have a base directory of its contents, rather then extracting into the -# current directory. -output_dir="$(basename "$OUTPUT_TAR")" -for ext in .xz .bz2 .gzip .gz .tgz .tar; do - output_dir="${output_dir%$ext}" -done - -info Installing scripts -install_scripts "$td/$output_dir" - -info Installing bootloader blobs -install_bootloader_blobs "$td/$output_dir" - -info Writing rootfs tar to "$IMAGE_SUBDIR/$ROOTFS_TAR" -tar -C "$ROOTDIR" -c . \ -| sh -c "${ROOTFS_COMPRESS-cat}" >"$td/$output_dir/$IMAGE_SUBDIR/$ROOTFS_TAR" - -info Writing image package tar to "$OUTPUT_TAR" -tar -C "$td" -c "$output_dir" | sh -c "${OUTPUT_COMPRESS-cat}" >"$OUTPUT_TAR" diff --git a/initramfs.write b/initramfs.write deleted file mode 100755 index 1059defa..00000000 --- a/initramfs.write +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# =*= License: GPL-2 =*= - -set -e - -ROOTDIR="$1" -INITRAMFS_PATH="$2" - -(cd "$ROOTDIR" && - find . -print0 | - cpio -0 -H newc -o) | - gzip -c | install -D -m644 /dev/stdin "$INITRAMFS_PATH" diff --git a/initramfs.write.help b/initramfs.write.help deleted file mode 100644 index 54d3ae8c..00000000 --- a/initramfs.write.help +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - - Create an initramfs for a system by taking an existing system and - converting it to the appropriate format. - - The system must have a `/init` executable as the userland entry-point. - This can have a different path, if `rdinit=$path` is added to - the kernel command line. This can be added to the `rawdisk`, - `virtualbox-ssh` and `kvm` write extensions with the `KERNEL_CMDLINE` - option. - - It is possible to use a ramfs as the final rootfs without a `/init` - executable, by setting `root=/dev/mem`, or `rdinit=/sbin/init`, - but this is beyond the scope for the `initramfs.write` extension. - - The intended use of initramfs.write is to be part of a nested - deployment, so the parent system has an initramfs stored as - `/boot/initramfs.gz`. See the following example: - - name: initramfs-test - kind: cluster - systems: - - morph: minimal-system-x86_64-generic - deploy: - system: - type: rawdisk - location: initramfs-system-x86_64.img - DISK_SIZE: 1G - HOSTNAME: initramfs-system - INITRAMFS_PATH: boot/initramfs.gz - subsystems: - - morph: initramfs-x86_64 - deploy: - initramfs: - type: initramfs - location: boot/initramfs.gz - - Parameters: - - * location: the path where the initramfs will be installed (e.g. - `boot/initramfs.gz`) in the above example diff --git a/install-essential-files.configure b/install-essential-files.configure deleted file mode 100755 index 2779b0d4..00000000 --- a/install-essential-files.configure +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python2 -# Copyright (C) 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -''' A Morph configuration extension for adding essential files to a system - -It will read the manifest files located in essential-files/manifest, -then use the contens of those files to determine which files -to install into the target system. - -''' - -import subprocess -import os - -import cliapp - -class InstallEssentialFilesConfigureExtension(cliapp.Application): - - def process_args(self, args): - target_root = args[0] - os.environ["INSTALL_FILES"] = "essential-files/manifest" - self.install_essential_files(target_root) - - def install_essential_files(self, target_root): - command = os.path.join(os.path.dirname(__file__), - "install-files.configure") - subprocess.check_call([command, target_root]) - -InstallEssentialFilesConfigureExtension().run() diff --git a/install-essential-files.configure.help b/install-essential-files.configure.help deleted file mode 100644 index 9148aeff..00000000 --- a/install-essential-files.configure.help +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - This installs files from the essential-files/ folder in your - definitions.git repo, according to essential-files/manifest. - - It wraps the install-files.configure extension. Take a look to that - extension help to know more about the format of the manifest file. diff --git a/install-files.configure b/install-files.configure deleted file mode 100755 index 341cce61..00000000 --- a/install-files.configure +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -''' A Morph configuration extension for adding arbitrary files to a system - -It will read the manifest files specified in the environment variable -INSTALL_FILES, then use the contens of those files to determine which files -to install into the target system. - -''' - -import cliapp -import os -import errno -import re -import sys -import shlex -import shutil -import stat - -try: - import jinja2 - jinja_available = True -except ImportError: - jinja_available = False - -class InstallFilesConfigureExtension(cliapp.Application): - - def process_args(self, args): - if not 'INSTALL_FILES' in os.environ: - return - target_root = args[0] - manifests = shlex.split(os.environ['INSTALL_FILES']) - for manifest in manifests: - self.install_manifest(manifest, target_root) - - def install_manifest(self, manifest, target_root): - manifest_dir = os.path.dirname(manifest) - with open(manifest) as f: - entries = f.readlines() - for entry in entries: - self.install_entry(entry, manifest_dir, target_root) - - def force_symlink(self, source, link_name): - try: - os.symlink(source, link_name) - except OSError as e: - if e.errno == errno.EEXIST: - os.remove(link_name) - os.symlink(source, link_name) - - def install_entry(self, entry, manifest_root, target_root): - m = re.match('(template )?(overwrite )?' - '([0-7]+) ([0-9]+) ([0-9]+) (\S+)', entry) - - if m: - template = m.group(1) - overwrite = m.group(2) - mode = int(m.group(3), 8) # mode is octal - uid = int(m.group(4)) - gid = int(m.group(5)) - path = m.group(6) - else: - raise cliapp.AppException('Invalid manifest entry, ' - 'format: [template] [overwrite] ' - ' ') - - dest_path = os.path.join(target_root, './' + path) - if stat.S_ISDIR(mode): - if os.path.exists(dest_path) and not overwrite: - dest_stat = os.stat(dest_path) - if (mode != dest_stat.st_mode - or uid != dest_stat.st_uid - or gid != dest_stat.st_gid): - raise cliapp.AppException('"%s" exists and is not ' - 'identical to directory ' - '"%s"' % (dest_path, entry)) - else: - os.mkdir(dest_path, mode) - os.chown(dest_path, uid, gid) - os.chmod(dest_path, mode) - - elif stat.S_ISLNK(mode): - if os.path.lexists(dest_path) and not overwrite: - raise cliapp.AppException('Symlink already exists at %s' - % dest_path) - else: - linkdest = os.readlink(os.path.join(manifest_root, - './' + path)) - self.force_symlink(linkdest, dest_path) - os.lchown(dest_path, uid, gid) - - elif stat.S_ISREG(mode): - if os.path.lexists(dest_path) and not overwrite: - raise cliapp.AppException('File already exists at %s' - % dest_path) - else: - if template: - if not jinja_available: - raise cliapp.AppException( - "Failed to install template file `%s': " - 'install-files templates require jinja2' - % path) - - loader = jinja2.FileSystemLoader(manifest_root) - env = jinja2.Environment(loader=loader, - keep_trailing_newline=True) - - env.get_template(path).stream(os.environ).dump(dest_path) - else: - shutil.copyfile(os.path.join(manifest_root, './' + path), - dest_path) - - os.chown(dest_path, uid, gid) - os.chmod(dest_path, mode) - - else: - raise cliapp.AppException('Mode given in "%s" is not a file,' - ' symlink or directory' % entry) - -InstallFilesConfigureExtension().run() diff --git a/install-files.configure.help b/install-files.configure.help deleted file mode 100644 index 991c26c8..00000000 --- a/install-files.configure.help +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - Install a set of files onto a system - - To use this extension you create a directory of files you want to install - onto the target system. - - In this example we want to copy some ssh keys onto a system - - % mkdir sshkeyfiles - % mkdir -p sshkeyfiles/root/.ssh - % cp id_rsa sshkeyfiles/root/.ssh - % cp id_rsa.pub sshkeyfiles/root/.ssh - - Now we need to create a manifest file to set the file modes - and persmissions. The manifest file should be created inside the - directory that contains the files we're trying to install. - - cat << EOF > sshkeyfiles/manifest - 0040755 0 0 /root/.ssh - 0100600 0 0 /root/.ssh/id_rsa - 0100644 0 0 /root/.ssh/id_rsa.pub - EOF - - Then we add the path to our manifest to our cluster morph, - this path should be relative to the system definitions repository. - - INSTALL_FILES: sshkeysfiles/manifest - - More generally entries in the manifest are formatted as: - [overwrite] - - NOTE: Directories on the target must be created if they do not exist. - - The extension supports files, symlinks and directories. - - For example, - - 0100644 0 0 /etc/issue - - creates a regular file at /etc/issue with 644 permissions, - uid 0 and gid 0, if the file doesn't already exist. - - overwrite 0100644 0 0 /etc/issue - - creates a regular file at /etc/issue with 644 permissions, - uid 0 and gid 0, if the file already exists it is overwritten. - - 0100755 0 0 /usr/bin/foo - - creates an executable file at /usr/bin/foo - - 0040755 0 0 /etc/foodir - - creates a directory with 755 permissions - - 0120000 0 0 /usr/bin/bar - - creates a symlink at /usr/bin/bar - - NOTE: You will still need to make a symlink in the manifest directory. diff --git a/installer.configure b/installer.configure deleted file mode 100755 index a77dc851..00000000 --- a/installer.configure +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/python -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to configure an installer -# system. It will create the configuration needed in the installer system -# to perform an installation. It uses the following variables from the -# environment: -# -# * INSTALLER_TARGET_STORAGE_DEVICE -# * INSTALLER_ROOTFS_TO_INSTALL -# * INSTALLER_POST_INSTALL_COMMAND (optional, defaults to `reboot -f`) - -import os -import sys -import yaml - -install_config_file = os.path.join(sys.argv[1], 'etc', 'install.conf') - -try: - installer_configuration = { - 'INSTALLER_TARGET_STORAGE_DEVICE': os.environ['INSTALLER_TARGET_STORAGE_DEVICE'], - 'INSTALLER_ROOTFS_TO_INSTALL': os.environ['INSTALLER_ROOTFS_TO_INSTALL'], - } -except KeyError as e: - print "Not configuring as an installer system" - sys.exit(0) - -postinstkey = 'INSTALLER_POST_INSTALL_COMMAND' -installer_configuration[postinstkey] = os.environ.get(postinstkey, 'reboot -f') - -with open(install_config_file, 'w') as f: - f.write( yaml.dump(installer_configuration, default_flow_style=False) ) - -print "Configuration of the installer system in %s" % install_config_file diff --git a/jffs2.write b/jffs2.write deleted file mode 100644 index 46b69a53..00000000 --- a/jffs2.write +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/python -#-*- coding: utf-8 -*- -# Copyright © 2015 Codethink Limited - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -'''A Morph deployment write extension for creating images with jffs2 - as the root filesystem.''' - - -import cliapp -import os - -import morphlib.writeexts - - -class Jffs2WriteExtension(morphlib.writeexts.WriteExtension): - - '''See jffs2.write.help for documentation.''' - - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - temp_root, location = args - - try: - self.create_jffs2_system(temp_root, location) - self.status(msg='Disk image has been created at %(location)s', - location = location) - except Exception: - self.status(msg='Failure to deploy system to %(location)s', - location = location) - raise - - def create_jffs2_system(self, temp_root, location): - erase_block = self.get_erase_block_size() - cliapp.runcmd( - ['mkfs.jffs2', '--pad', '--no-cleanmarkers', - '--eraseblock='+erase_block, '-d', temp_root, '-o', location]) - - def get_erase_block_size(self): - erase_block = os.environ.get('ERASE_BLOCK', '') - - if erase_block == '': - raise cliapp.AppException('ERASE_BLOCK was not given') - - if not erase_block.isdigit(): - raise cliapp.AppException('ERASE_BLOCK must be a whole number') - - return erase_block - -Jffs2WriteExtension().run() diff --git a/jffs2.write.help b/jffs2.write.help deleted file mode 100644 index 059a354b..00000000 --- a/jffs2.write.help +++ /dev/null @@ -1,28 +0,0 @@ -#-*- coding: utf-8 -*- -# Copyright © 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - - Creates a system produced by Morph build with a jffs2 filesystem and then - writes to an image. To use this extension, the host system must have access - to mkfs.jffs2 which is provided in the mtd-utilities.morph stratum. - - Parameters: - - * location: the pathname of the disk image to be created/upgraded, or the - path to the physical device. - - * ERASE_BLOCK: the erase block size of the target system, which can be - found in '/sys/class/mtd/mtdx/erasesize' diff --git a/kvm.check b/kvm.check deleted file mode 100755 index 67cb3d38..00000000 --- a/kvm.check +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -'''Preparatory checks for Morph 'kvm' write extension''' - -import cliapp -import os -import re -import urlparse - -import morphlib.writeexts - - -class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension): - - location_pattern = '^/(?P[^/]+)(?P/.+)$' - - def process_args(self, args): - if len(args) != 1: - raise cliapp.AppException('Wrong number of command line args') - - self.require_btrfs_in_deployment_host_kernel() - - upgrade = self.get_environment_boolean('UPGRADE') - if upgrade: - raise cliapp.AppException( - 'Use the `ssh-rsync` write extension to deploy upgrades to an ' - 'existing remote system.') - - location = args[0] - ssh_host, vm_name, vm_path = self.check_and_parse_location(location) - - self.check_ssh_connectivity(ssh_host) - self.check_can_create_file_at_given_path(ssh_host, vm_path) - self.check_no_existing_libvirt_vm(ssh_host, vm_name) - self.check_extra_disks_exist(ssh_host, self.parse_attach_disks()) - self.check_virtual_networks_are_started(ssh_host) - self.check_host_has_virtinstall(ssh_host) - - def check_and_parse_location(self, location): - '''Check and parse the location argument to get relevant data.''' - - x = urlparse.urlparse(location) - - if x.scheme != 'kvm+ssh': - raise cliapp.AppException( - 'URL schema must be kvm+ssh in %s' % location) - - m = re.match(self.location_pattern, x.path) - if not m: - raise cliapp.AppException('Cannot parse location %s' % location) - - return x.netloc, m.group('guest'), m.group('path') - - def check_no_existing_libvirt_vm(self, ssh_host, vm_name): - try: - cliapp.ssh_runcmd(ssh_host, - ['virsh', '--connect', 'qemu:///system', 'domstate', vm_name]) - except cliapp.AppException as e: - pass - else: - raise cliapp.AppException( - 'Host %s already has a VM named %s. You can use the ssh-rsync ' - 'write extension to deploy upgrades to existing machines.' % - (ssh_host, vm_name)) - - def check_can_create_file_at_given_path(self, ssh_host, vm_path): - - def check_can_write_to_given_path(): - try: - cliapp.ssh_runcmd(ssh_host, ['touch', vm_path]) - except cliapp.AppException as e: - raise cliapp.AppException("Can't write to location %s on %s" - % (vm_path, ssh_host)) - else: - cliapp.ssh_runcmd(ssh_host, ['rm', vm_path]) - - try: - cliapp.ssh_runcmd(ssh_host, ['test', '-e', vm_path]) - except cliapp.AppException as e: - # vm_path doesn't already exist, so let's test we can write - check_can_write_to_given_path() - else: - raise cliapp.AppException('%s already exists on %s' - % (vm_path, ssh_host)) - - def check_extra_disks_exist(self, ssh_host, filename_list): - for filename in filename_list: - try: - cliapp.ssh_runcmd(ssh_host, ['ls', filename]) - except cliapp.AppException as e: - raise cliapp.AppException('Did not find file %s on host %s' % - (filename, ssh_host)) - - def check_virtual_networks_are_started(self, ssh_host): - - def check_virtual_network_is_started(network_name): - cmd = ['virsh', '-c', 'qemu:///system', 'net-info', network_name] - net_info = cliapp.ssh_runcmd(ssh_host, cmd).split('\n') - - def pretty_concat(lines): - return '\n'.join(['\t%s' % line for line in lines]) - - for line in net_info: - m = re.match('^Active:\W*(\w+)\W*', line) - if m: - break - else: - raise cliapp.AppException( - "Got unexpected output parsing output of `%s':\n%s" - % (' '.join(cmd), pretty_concat(net_info))) - - network_active = m.group(1) == 'yes' - - if not network_active: - raise cliapp.AppException("Network '%s' is not started" - % network_name) - - def name(nic_entry): - if ',' in nic_entry: - # network=NETWORK_NAME,mac=12:34,model=e1000... - return nic_entry[:nic_entry.find(',')].lstrip('network=') - else: - return nic_entry.lstrip('network=') # NETWORK_NAME - - if 'NIC_CONFIG' in os.environ: - nics = os.environ['NIC_CONFIG'].split() - - for n in nics: - if not (n.startswith('network=') - or n.startswith('bridge=') - or n == 'user'): - raise cliapp.AppException('malformed NIC_CONFIG: %s\n' - " (expected 'bridge=BRIDGE' 'network=NAME'" - " or 'user')" % n) - - # --network bridge= is used to specify a bridge - # --network user is used to specify a form of NAT - # (see the virt-install(1) man page) - networks = [name(n) for n in nics if not n.startswith('bridge=') - and not n.startswith('user')] - else: - networks = ['default'] - - for network in networks: - check_virtual_network_is_started(network) - - def check_host_has_virtinstall(self, ssh_host): - try: - cliapp.ssh_runcmd(ssh_host, ['which', 'virt-install']) - except cliapp.AppException: - raise cliapp.AppException( - 'virt-install does not seem to be installed on host %s' - % ssh_host) - - -KvmPlusSshCheckExtension().run() diff --git a/kvm.write b/kvm.write deleted file mode 100755 index 0d0c095b..00000000 --- a/kvm.write +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2012-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -'''A Morph deployment write extension for deploying to KVM+libvirt. - -See file kvm.write.help for documentation - -''' - - -import cliapp -import os -import re -import sys -import tempfile -import urlparse - -import morphlib.writeexts - - -class KvmPlusSshWriteExtension(morphlib.writeexts.WriteExtension): - - location_pattern = '^/(?P[^/]+)(?P/.+)$' - - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - temp_root, location = args - ssh_host, vm_name, vm_path = self.parse_location(location) - autostart = self.get_environment_boolean('AUTOSTART') - - fd, raw_disk = tempfile.mkstemp() - os.close(fd) - self.create_local_system(temp_root, raw_disk) - - try: - self.transfer(raw_disk, ssh_host, vm_path) - self.create_libvirt_guest(ssh_host, vm_name, vm_path, autostart) - except BaseException: - sys.stderr.write('Error deploying to libvirt') - os.remove(raw_disk) - cliapp.ssh_runcmd(ssh_host, ['rm', '-f', vm_path]) - raise - else: - os.remove(raw_disk) - - self.status( - msg='Virtual machine %(vm_name)s has been created', - vm_name=vm_name) - - def parse_location(self, location): - '''Parse the location argument to get relevant data.''' - - x = urlparse.urlparse(location) - m = re.match('^/(?P[^/]+)(?P/.+)$', x.path) - return x.netloc, m.group('guest'), m.group('path') - - def transfer(self, raw_disk, ssh_host, vm_path): - '''Transfer raw disk image to libvirt host.''' - - self.status(msg='Transferring disk image') - - xfer_hole_path = morphlib.util.get_data_path('xfer-hole') - recv_hole = morphlib.util.get_data('recv-hole') - - ssh_remote_cmd = [ - 'sh', '-c', recv_hole, 'dummy-argv0', 'file', vm_path - ] - - cliapp.runcmd( - ['python', xfer_hole_path, raw_disk], - ['ssh', ssh_host] + map(cliapp.shell_quote, ssh_remote_cmd), - stdout=None, stderr=None) - - def create_libvirt_guest(self, ssh_host, vm_name, vm_path, autostart): - '''Create the libvirt virtual machine.''' - - self.status(msg='Creating libvirt/kvm virtual machine') - - attach_disks = self.parse_attach_disks() - attach_opts = [] - for disk in attach_disks: - attach_opts.extend(['--disk', 'path=%s' % disk]) - - if 'NIC_CONFIG' in os.environ: - nics = os.environ['NIC_CONFIG'].split() - for nic in nics: - attach_opts.extend(['--network', nic]) - - ram_mebibytes = str(self.get_ram_size() / (1024**2)) - - vcpu_count = str(self.get_vcpu_count()) - - cmdline = ['virt-install', '--connect', 'qemu:///system', - '--import', '--name', vm_name, '--vnc', - '--ram', ram_mebibytes, '--vcpus', vcpu_count, - '--disk', 'path=%s,bus=ide' % vm_path] + attach_opts - if not autostart: - cmdline += ['--noreboot'] - cliapp.ssh_runcmd(ssh_host, cmdline) - - if autostart: - cliapp.ssh_runcmd(ssh_host, - ['virsh', '--connect', 'qemu:///system', 'autostart', vm_name]) - -KvmPlusSshWriteExtension().run() diff --git a/kvm.write.help b/kvm.write.help deleted file mode 100644 index 812a5309..00000000 --- a/kvm.write.help +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - - Deploy a Baserock system as a *new* KVM/LibVirt virtual machine. - - Use the `ssh-rsync` write extension to deploy upgrades to an *existing* VM - - Parameters: - - * location: a custom URL scheme of the form `kvm+ssh://HOST/GUEST/PATH`, - where: - * HOST is the name of the host on which KVM/LibVirt is running - * GUEST is the name of the guest VM on that host - * PATH is the path to the disk image that should be created, - on that host. For example, - `kvm+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where - * `alice@192.168.122.1` is the target host as given to ssh, - **from within the development host** (which may be - different from the target host's normal address); - * `testsys` is the name of the new guest VM'; - * `/home/alice/testys.img` is the pathname of the disk image files - on the target host. - - * HOSTNAME=name: the hostname of the **guest** VM within the network into - which it is being deployed - - * DISK_SIZE=X: the size of the VM's primary virtual hard disk. `X` should - use a suffix of `K`, `M`, or `G` (in upper or lower case) to indicate - kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would create a - 100 gigabyte disk image. **This parameter is mandatory**. - - * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate - for itself from the host. `X` is interpreted in the same was as for - DISK_SIZE`, and defaults to `1G` - - * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do - not use more CPU cores than you have available physically (real cores, no - hyperthreads) - - * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to - tell Linux to use, rather than booting the rootfs directly. - - * AUTOSTART=` - boolean. If it is set, the VM will be started when - it has been deployed. - - * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree - binary - Give the full path (without a leading /) to the location of the - DTB in the built system image . The deployment will fail if `path` does - not exist. - - * BOOTLOADER_INSTALL=value: the bootloader to be installed - **(MANDATORY)** for non-x86 systems - - allowed values = - - 'extlinux' (default) - the extlinux bootloader will - be installed - - 'none' - no bootloader will be installed by `morph deploy`. A - bootloader must be installed manually. This value must be used when - deploying non-x86 systems such as ARM. - - * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used. - If not specified for x86-32 and x86-64 systems, 'extlinux' will be used - - allowed values = - - 'extlinux' - - * KERNEL_ARGS=args: optional additional kernel command-line parameters to - be appended to the default set. The default set is: - - 'rw init=/sbin/init rootfstype=btrfs \ - rootflags=subvol=systems/default/run \ - root=[name or UUID of root filesystem]' - - (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt) - - (See `morph help deploy` for details of how to pass parameters to write - extensions) diff --git a/mason.configure b/mason.configure deleted file mode 100644 index 1198ebd0..00000000 --- a/mason.configure +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to fully configure -# a Mason instance at deployment time. It uses the following variables -# from the environment: -# -# * ARTIFACT_CACHE_SERVER -# * MASON_CLUSTER_MORPHOLOGY -# * MASON_DEFINITIONS_REF -# * MASON_DISTBUILD_ARCH -# * MASON_TEST_HOST -# * OPENSTACK_NETWORK_ID -# * TEST_INFRASTRUCTURE_TYPE -# * TROVE_HOST -# * TROVE_ID -# * CONTROLLERHOST - -set -e - -########################################################################## -# Copy Mason files into root filesystem -########################################################################## - -ROOT="$1" - -mkdir -p "$ROOT"/usr/lib/mason -cp mason/mason.sh "$ROOT"/usr/lib/mason/mason.sh -cp mason/mason-report.sh "$ROOT"/usr/lib/mason/mason-report.sh -cp mason/os-init-script "$ROOT"/usr/lib/mason/os-init-script - -cp mason/mason.timer "$ROOT"/etc/systemd/system/mason.timer - -cp mason/mason.service "$ROOT"/etc/systemd/system/mason.service - -########################################################################## -# Set up httpd web server -########################################################################## - -cp mason/httpd.service "$ROOT"/etc/systemd/system/httpd.service - -mkdir -p "$ROOT"/srv/mason - -cat >>"$ROOT"/etc/httpd.conf <"$MASON_DATA/mason.conf" -import os, sys, yaml - -mason_configuration={ - 'ARTIFACT_CACHE_SERVER': os.environ['ARTIFACT_CACHE_SERVER'], - 'MASON_CLUSTER_MORPHOLOGY': os.environ['MASON_CLUSTER_MORPHOLOGY'], - 'MASON_DEFINITIONS_REF': os.environ['MASON_DEFINITIONS_REF'], - 'MASON_DISTBUILD_ARCH': os.environ['MASON_DISTBUILD_ARCH'], - 'MASON_TEST_HOST': os.environ['MASON_TEST_HOST'], - 'OPENSTACK_NETWORK_ID': os.environ['OPENSTACK_NETWORK_ID'], - 'TEST_INFRASTRUCTURE_TYPE': os.environ['TEST_INFRASTRUCTURE_TYPE'], - 'TROVE_ID': os.environ['TROVE_ID'], - 'TROVE_HOST': os.environ['TROVE_HOST'], - 'CONTROLLERHOST': os.environ['CONTROLLERHOST'], -} - -yaml.dump(mason_configuration, sys.stdout, default_flow_style=False) -EOF - -if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then - python <<'EOF' >>"$MASON_DATA/mason.conf" -import os, sys, yaml - -openstack_credentials={ - 'OS_USERNAME': os.environ['OPENSTACK_USER'], - 'OS_TENANT_NAME': os.environ['OPENSTACK_TENANT'], - 'OS_TENANT_ID': os.environ['OPENSTACK_TENANT_ID'], - 'OS_AUTH_URL': os.environ['OPENSTACK_AUTH_URL'], - 'OS_PASSWORD': os.environ['OPENSTACK_PASSWORD'], -} - -yaml.dump(openstack_credentials, sys.stdout, default_flow_style=False) -EOF -fi - -########################################################################## -# Enable services -########################################################################## - -ln -s ../mason.timer "$ROOT"/etc/systemd/system/multi-user.target.wants/mason.timer -ln -s ../httpd.service "$ROOT"/etc/systemd/system/multi-user.target.wants/httpd.service diff --git a/mason/ansible/hosts b/mason/ansible/hosts deleted file mode 100644 index 5b97818d..00000000 --- a/mason/ansible/hosts +++ /dev/null @@ -1 +0,0 @@ -localhost ansible_connection=local diff --git a/mason/ansible/mason-setup.yml b/mason/ansible/mason-setup.yml deleted file mode 100644 index d1528dbb..00000000 --- a/mason/ansible/mason-setup.yml +++ /dev/null @@ -1,83 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/mason/mason.conf" - tasks: - - - - fail: msg='TROVE_ID is mandatory' - when: TROVE_ID is not defined - - - fail: msg='TROVE_HOST is mandatory' - when: TROVE_HOST is not defined - - - fail: msg='ARTIFACT_CACHE_SERVER is mandatory' - when: ARTIFACT_CACHE_SERVER is not defined - - - fail: msg='MASON_CLUSTER_MORPHOLOGY is mandatory' - when: MASON_CLUSTER_MORPHOLOGY is not defined - - - fail: msg='MASON_DEFINITIONS_REF is mandatory' - when: MASON_DEFINITIONS_REF is not defined - - - fail: msg='MASON_DISTBUILD_ARCH is mandatory' - when: MASON_DISTBUILD_ARCH is not defined - - - fail: msg='MASON_TEST_HOST is mandatory' - when: MASON_TEST_HOST is not defined - - - fail: msg='CONTROLLERHOST is mandatory' - when: CONTROLLERHOST is not defined - - - fail: msg='TEST_INFRASTRUCTURE_TYPE is mandatory' - when: TEST_INFRASTRUCTURE_TYPE is not defined - - - fail: msg='OPENSTACK_NETWORK_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' - when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OPENSTACK_NETWORK_ID is not defined - - - fail: msg='OS_USERNAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' - when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_USERNAME is not defined - - - fail: msg='OS_PASSWORD is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' - when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_PASSWORD is not defined - - - fail: msg='OS_TENANT_ID is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' - when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_ID is not defined - - - fail: msg='OS_TENANT_NAME is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' - when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_TENANT_NAME is not defined - - - fail: msg='OS_AUTH_URL is mandatory when TEST_INFRASTRUCTURE_TYPE=openstack' - when: TEST_INFRASTRUCTURE_TYPE == "openstack" and OS_AUTH_URL is not defined - - - name: Create the Mason configuration file - template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }} - with_items: - - mason.conf - - - name: Create the OpenStack credentials file - template: src=/usr/share/mason-setup/{{ item }} dest=/etc/{{ item }} - with_items: - - os.conf - when: TEST_INFRASTRUCTURE_TYPE == "openstack" - - - name: Enable the mason service - service: name=mason.service enabled=yes - register: mason_service - - name: Restart the mason service - service: name=mason.service state=restarted - when: mason_service|changed - - - name: Enable the mason timer - service: name=mason.timer enabled=yes - register: mason_timer - - name: Restart the mason timer - service: name=mason.timer state=restarted - when: mason_timer|changed - - - name: Enable the httpd service - service: name=httpd.service enabled=yes - register: httpd_service - - name: Restart the httpd service - service: name=httpd state=restarted - when: httpd_service|changed diff --git a/mason/httpd.service b/mason/httpd.service deleted file mode 100644 index 7572b732..00000000 --- a/mason/httpd.service +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=HTTP server for Mason -After=network.target - -[Service] -User=root -ExecStart=/usr/sbin/httpd -f -p 80 -h /srv/mason - -[Install] -WantedBy=multi-user.target diff --git a/mason/mason-generator.sh b/mason/mason-generator.sh deleted file mode 100755 index 187db72c..00000000 --- a/mason/mason-generator.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/sh - -set -e - -if [ "$#" -lt 5 -o "$#" -gt 6 -o "$1" == "-h" -o "$1" == "--help" ]; then - cat < $REPORT_PATH <<'EOF' - - - - - - - -

Mason

-

Baserock: Continuous Delivery

-

Build log of changes to BRANCH from TROVE. Most recent first.

- - - - - - - - -
StartedRefDurationResult
- - - -EOF - - sed -i 's/BRANCH/'"$(sed_escape "$1")"'/' $REPORT_PATH - sed -i 's/TROVE/'"$(sed_escape "$2")"'/' $REPORT_PATH -} - -update_report() { - # Give function params sensible names - build_start_time="$1" - build_trove_host="$2" - build_ref="$3" - build_sha1="$4" - build_duration="$5" - build_result="$6" - - # Generate template if report file is not there - if [ ! -f $REPORT_PATH ]; then - create_report $build_ref $build_trove_host - fi - - # Build table row for insertion into report file - if [ "$build_result" = nonet ]; then - msg=''"${build_start_time}"'Failed to contact '"${build_trove_host}"''"${build_duration}s"''"${build_result}"'' - else - msg=''"${build_start_time}"''"${build_sha1}"''"${build_duration}s"''"${build_result}"'' - fi - - # Insert report line, newest at top - sed -i 's//\n'"$(sed_escape "$msg")"'/' $REPORT_PATH -} - -update_report_time() { - # Give function params sensible names - build_start_time="$1" - - # If the report file exists, update the last-checked-for-updates time - if [ -f $REPORT_PATH ]; then - sed -i 's/....-..-.. ..:..:..<\/code>/'"$(sed_escape "$build_start_time")"'<\/code>/' $REPORT_PATH - fi -} - -START_TIME=`date +%Y-%m-%d\ %T` - -update_report_time "$START_TIME" -cp "$REPORT_PATH" "$SERVER_PATH/index.html" - -logfile="$(mktemp)" -/usr/lib/mason/mason.sh 2>&1 | tee "$logfile" -case "${PIPESTATUS[0]}" in -0) - RESULT=pass - ;; -33) - RESULT=skip - ;; -42) - RESULT=nonet - ;; -*) - RESULT=fail - ;; -esac - -# TODO: Update page with last executed time -if [ "$RESULT" = skip ]; then - rm "$logfile" - exit 0 -fi - -DURATION=$(( $(date +%s) - $(date --date="$START_TIME" +%s) )) -SHA1="$(cd "ws/$DEFINITIONS_REF/$UPSTREAM_TROVE_ADDRESS/baserock/baserock/definitions" && git rev-parse HEAD)" - -update_report "$START_TIME" \ - "$UPSTREAM_TROVE_ADDRESS" \ - "$DEFINITIONS_REF" \ - "$SHA1" \ - "$DURATION" \ - "$RESULT" - - -# -# Copy report into server directory -# - -cp "$REPORT_PATH" "$SERVER_PATH/index.html" -mkdir "$SERVER_PATH/log" -mv "$logfile" "$SERVER_PATH/log/$SHA1--$START_TIME.log" diff --git a/mason/mason-setup.service b/mason/mason-setup.service deleted file mode 100644 index 60403bde..00000000 --- a/mason/mason-setup.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=Run mason-setup Ansible scripts -Requires=network.target -After=network.target -Requires=opensshd.service -After=opensshd.service - -# If there's a shared /var subvolume, it must be mounted before this -# unit runs. -Requires=local-fs.target -After=local-fs.target - -ConditionPathExists=/etc/mason/mason.conf - -[Service] -ExecStart=/usr/bin/ansible-playbook -v -i /usr/lib/mason-setup/ansible/hosts /usr/lib/mason-setup/ansible/mason-setup.yml diff --git a/mason/mason.service b/mason/mason.service deleted file mode 100644 index d5c99498..00000000 --- a/mason/mason.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Mason: Continuous Delivery Service -After=mason-setup.service -ConditionPathIsDirectory=/srv/distbuild - -[Service] -User=root -ExecStart=/usr/lib/mason/mason-report.sh -WorkingDirectory=/srv/distbuild - -[Install] -WantedBy=multi-user.target diff --git a/mason/mason.sh b/mason/mason.sh deleted file mode 100755 index dba99dfa..00000000 --- a/mason/mason.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/sh - -# Load OpenStack credentials -if [ -f "/etc/os.conf" ]; then - . /etc/os.conf -fi - -set -e -set -x - -# Load our deployment config -. /etc/mason.conf - -if [ ! -e ws ]; then - morph init ws -fi -cd ws - -definitions_repo="$DEFINITIONS_REF"/"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions -if [ ! -e "$definitions_repo" ]; then - morph checkout git://"$UPSTREAM_TROVE_ADDRESS"/baserock/baserock/definitions "$DEFINITIONS_REF" - cd "$definitions_repo" - git config user.name "$TROVE_ID"-mason - git config user.email "$TROVE_ID"-mason@$(hostname) -else - cd "$definitions_repo" - SHA1_PREV="$(git rev-parse HEAD)" -fi - -if ! git remote update origin; then - echo ERROR: Unable to contact trove - exit 42 -fi -git clean -fxd -git reset --hard origin/"$DEFINITIONS_REF" - -SHA1="$(git rev-parse HEAD)" - -if [ -f "$HOME/success" ] && [ "$SHA1" = "$SHA1_PREV" ]; then - echo INFO: No changes to "$DEFINITIONS_REF", nothing to do - exit 33 -fi - -rm -f "$HOME/success" - -echo INFO: Mason building: $DEFINITIONS_REF at $SHA1 - -if ! "scripts/release-build" --no-default-configs \ - --trove-host "$UPSTREAM_TROVE_ADDRESS" \ - --artifact-cache-server "http://$ARTIFACT_CACHE_SERVER:8080/" \ - --controllers "$DISTBUILD_ARCH:$DISTBUILD_CONTROLLER_ADDRESS" \ - "$BUILD_CLUSTER_MORPHOLOGY"; then - echo ERROR: Failed to build release images - echo Build logs for chunks: - find builds -type f -exec echo {} \; -exec cat {} \; - exit 1 -fi - -releases_made="$(cd release && ls | wc -l)" -if [ "$releases_made" = 0 ]; then - echo ERROR: No release images created - exit 1 -else - echo INFO: Created "$releases_made" release images -fi - -if [ "$TEST_INFRASTRUCTURE_TYPE" = "openstack" ]; then - "scripts/release-test-os" \ - --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \ - --trove-host "$UPSTREAM_TROVE_ADDRESS" \ - --trove-id "$TROVE_ID" \ - --net-id "$OPENSTACK_NETWORK_ID" \ - "$BUILD_CLUSTER_MORPHOLOGY" -elif [ "$TEST_INFRASTRUCTURE_TYPE" = "kvmhost" ]; then - "scripts/release-test" \ - --deployment-host "$DISTBUILD_ARCH":"$MASON_TEST_HOST" \ - --trove-host "$UPSTREAM_TROVE_ADDRESS" \ - --trove-id "$TROVE_ID" \ - "$BUILD_CLUSTER_MORPHOLOGY" -fi - -"scripts/release-upload" --build-trove-host "$ARTIFACT_CACHE_SERVER" \ - --arch "$DISTBUILD_ARCH" \ - --log-level=debug --log="$HOME"/release-upload.log \ - --public-trove-host "$UPSTREAM_TROVE_ADDRESS" \ - --public-trove-username root \ - --public-trove-artifact-dir /home/cache/artifacts \ - --no-upload-release-artifacts \ - "$BUILD_CLUSTER_MORPHOLOGY" - -echo INFO: Artifact upload complete for $DEFINITIONS_REF at $SHA1 - -touch "$HOME/success" diff --git a/mason/mason.timer b/mason/mason.timer deleted file mode 100644 index 107dff97..00000000 --- a/mason/mason.timer +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=Runs Mason continually with 1 min between calls - -[Timer] -#Time between Mason finishing and calling it again -OnUnitActiveSec=1min -Unit=mason.service - -[Install] -WantedBy=multi-user.target diff --git a/mason/os-init-script b/mason/os-init-script deleted file mode 100644 index 77afb926..00000000 --- a/mason/os-init-script +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -# This allows the test runner to know that cloud-init has completed the -# disc resizing, and there is enough free space to continue. -touch /root/cloud-init-finished - diff --git a/mason/share/mason.conf b/mason/share/mason.conf deleted file mode 100644 index 1295ce84..00000000 --- a/mason/share/mason.conf +++ /dev/null @@ -1,14 +0,0 @@ -# This file is generarated by the mason-setup systemd unit. -# If you want to change the configuration, change the configuration -# in /etc/mason/mason.conf and restart the service. - -ARTIFACT_CACHE_SERVER={{ ARTIFACT_CACHE_SERVER|quote }} -UPSTREAM_TROVE_ADDRESS={{ TROVE_HOST|quote }} -DEFINITIONS_REF={{ MASON_DEFINITIONS_REF|quote }} -DISTBUILD_ARCH={{ MASON_DISTBUILD_ARCH|quote }} -DISTBUILD_CONTROLLER_ADDRESS={{ CONTROLLERHOST|quote }} -TROVE_ID={{ TROVE_ID|quote }} -BUILD_CLUSTER_MORPHOLOGY={{ MASON_CLUSTER_MORPHOLOGY|quote }} -MASON_TEST_HOST={{ MASON_TEST_HOST|quote }} -TEST_INFRASTRUCTURE_TYPE={{ TEST_INFRASTRUCTURE_TYPE|quote }} -{% if OPENSTACK_NETWORK_ID is defined %}OPENSTACK_NETWORK_ID={{ OPENSTACK_NETWORK_ID|quote }}{% endif %} diff --git a/mason/share/os.conf b/mason/share/os.conf deleted file mode 100644 index 21ef398c..00000000 --- a/mason/share/os.conf +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# A version of this file with the relevant information included can be -# obtained by navigating to 'Access & Security' -> 'API Access' -> -# 'Download OpenStack RC file' in The Horizon web interface of your -# OpenStack. However, the file obtained from there sets OS_PASSWORD -# such that it will ask the user for a password, so you will need to -# change that for Mason to work automatically. -# -# With the addition of Keystone, to use an openstack cloud you should -# authenticate against keystone, which returns a **Token** and **Service -# Catalog**. The catalog contains the endpoint for all services the -# user/tenant has access to - including nova, glance, keystone, swift. -# -# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We -# will use the 1.1 *compute api* -export OS_AUTH_URL={{ OS_AUTH_URL|quote }} - -# With the addition of Keystone we have standardized on the term **tenant** -# as the entity that owns the resources. -export OS_TENANT_ID={{ OS_TENANT_ID|quote }} -export OS_TENANT_NAME={{ OS_TENANT_NAME|quote }} - -# In addition to the owning entity (tenant), openstack stores the entity -# performing the action as the **user**. -export OS_USERNAME={{ OS_USERNAME|quote }} - -# With Keystone you pass the keystone password. -export OS_PASSWORD={{ OS_PASSWORD|quote }} - diff --git a/moonshot-kernel.configure b/moonshot-kernel.configure deleted file mode 100644 index 11d01751..00000000 --- a/moonshot-kernel.configure +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to convert a plain -# kernel Image to uImage, for an HP Moonshot m400 cartridge - -set -eu - -case "$MOONSHOT_KERNEL" in - True|yes) - echo "Converting kernel image for Moonshot" - mkimage -A arm -O linux -C none -T kernel -a 0x00080000 \ - -e 0x00080000 -n Linux -d "$1/boot/vmlinux" "$1/boot/uImage" - ;; - *) - echo Unrecognised option "$MOONSHOT_KERNEL" to MOONSHOT_KERNEL - exit 1 - ;; -esac diff --git a/nfsboot-server.configure b/nfsboot-server.configure deleted file mode 100755 index 9fb48096..00000000 --- a/nfsboot-server.configure +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2013-2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# -# This is a "morph deploy" configuration extension to set up a server for -# booting over nfs and tftp. -set -e - -ROOT="$1" - -########################################################################## - -nfsboot_root=/srv/nfsboot -tftp_root="$nfsboot_root"/tftp -nfs_root="$nfsboot_root"/nfs -mkdir -p "$ROOT$tftp_root" "$ROOT$nfs_root" - -install -D /dev/stdin "$ROOT/usr/lib/systemd/system/nfsboot-tftp.service" <. - -'''Preparatory checks for Morph 'nfsboot' write extension''' - -import cliapp -import os - -import morphlib.writeexts - - -class NFSBootCheckExtension(morphlib.writeexts.WriteExtension): - - _nfsboot_root = '/srv/nfsboot' - - def process_args(self, args): - if len(args) != 1: - raise cliapp.AppException('Wrong number of command line args') - - location = args[0] - - upgrade = self.get_environment_boolean('UPGRADE') - if upgrade: - raise cliapp.AppException( - 'Upgrading is not currently supported for NFS deployments.') - - hostname = os.environ.get('HOSTNAME', None) - if hostname is None: - raise cliapp.AppException('You must specify a HOSTNAME.') - if hostname == 'baserock': - raise cliapp.AppException('It is forbidden to nfsboot a system ' - 'with hostname "%s"' % hostname) - - self.test_good_server(location) - - version_label = os.getenv('VERSION_LABEL', 'factory') - versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', - version_label) - if self.version_exists(versioned_root, location): - raise cliapp.AppException( - 'Root file system for host %s (version %s) already exists on ' - 'the NFS server %s. Deployment aborted.' % (hostname, - version_label, location)) - - def test_good_server(self, server): - self.check_ssh_connectivity(server) - - # Is an NFS server - try: - cliapp.ssh_runcmd( - 'root@%s' % server, ['test', '-e', '/etc/exports']) - except cliapp.AppException: - raise cliapp.AppException('server %s is not an nfs server' - % server) - try: - cliapp.ssh_runcmd( - 'root@%s' % server, ['systemctl', 'is-enabled', - 'nfs-server.service']) - - except cliapp.AppException: - raise cliapp.AppException('server %s does not control its ' - 'nfs server by systemd' % server) - - # TFTP server exports /srv/nfsboot/tftp - tftp_root = os.path.join(self._nfsboot_root, 'tftp') - try: - cliapp.ssh_runcmd( - 'root@%s' % server, ['test' , '-d', tftp_root]) - except cliapp.AppException: - raise cliapp.AppException('server %s does not export %s' % - (tftp_root, server)) - - def version_exists(self, versioned_root, location): - try: - cliapp.ssh_runcmd('root@%s' % location, - ['test', '-d', versioned_root]) - except cliapp.AppException: - return False - - return True - - -NFSBootCheckExtension().run() diff --git a/nfsboot.configure b/nfsboot.configure deleted file mode 100755 index 6a68dc48..00000000 --- a/nfsboot.configure +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh -# Copyright (C) 2013-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -# Remove all networking interfaces. On nfsboot systems, eth0 is set up -# during kernel init, and the normal ifup@eth0.service systemd unit -# would break the NFS connection and cause the system to hang. - - -set -e -if [ "$NFSBOOT_CONFIGURE" ]; then - # Remove all networking interfaces but loopback - cat > "$1/etc/network/interfaces" <. - - -'''A Morph deployment write extension for deploying to an nfsboot server - -*** DO NOT USE *** -- This was written before 'proper' deployment mechanisms were in place -It is unlikely to work at all and will not work correctly - -Use the pxeboot write extension instead - -*** - - - -An nfsboot server is defined as a baserock system that has tftp and nfs -servers running, the tftp server is exporting the contents of -/srv/nfsboot/tftp/ and the user has sufficient permissions to create nfs roots -in /srv/nfsboot/nfs/ - -''' - - -import cliapp -import os -import glob - -import morphlib.writeexts - - -class NFSBootWriteExtension(morphlib.writeexts.WriteExtension): - - '''Create an NFS root and kernel on TFTP during Morph's deployment. - - The location command line argument is the hostname of the nfsboot server. - The user is expected to provide the location argument - using the following syntax: - - HOST - - where: - - * HOST is the host of the nfsboot server - - The extension will connect to root@HOST via ssh to copy the kernel and - rootfs, and configure the nfs server. - - It requires root because it uses systemd, and reads/writes to /etc. - - ''' - - _nfsboot_root = '/srv/nfsboot' - - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - temp_root, location = args - - version_label = os.getenv('VERSION_LABEL', 'factory') - hostname = os.environ['HOSTNAME'] - - versioned_root = os.path.join(self._nfsboot_root, hostname, 'systems', - version_label) - - self.copy_rootfs(temp_root, location, versioned_root, hostname) - self.copy_kernel(temp_root, location, versioned_root, version_label, - hostname) - self.configure_nfs(location, hostname) - - def create_local_state(self, location, hostname): - statedir = os.path.join(self._nfsboot_root, hostname, 'state') - subdirs = [os.path.join(statedir, 'home'), - os.path.join(statedir, 'opt'), - os.path.join(statedir, 'srv')] - cliapp.ssh_runcmd('root@%s' % location, - ['mkdir', '-p'] + subdirs) - - def copy_kernel(self, temp_root, location, versioned_root, version, - hostname): - bootdir = os.path.join(temp_root, 'boot') - image_names = ['vmlinuz', 'zImage', 'uImage'] - for name in image_names: - try_path = os.path.join(bootdir, name) - if os.path.exists(try_path): - kernel_src = try_path - break - else: - raise cliapp.AppException( - 'Could not find a kernel in the system: none of ' - '%s found' % ', '.join(image_names)) - - kernel_dest = os.path.join(versioned_root, 'orig', 'kernel') - rsync_dest = 'root@%s:%s' % (location, kernel_dest) - self.status(msg='Copying kernel') - cliapp.runcmd( - ['rsync', '-s', kernel_src, rsync_dest]) - - # Link the kernel to the right place - self.status(msg='Creating links to kernel in tftp directory') - tftp_dir = os.path.join(self._nfsboot_root , 'tftp') - versioned_kernel_name = "%s-%s" % (hostname, version) - kernel_name = hostname - try: - cliapp.ssh_runcmd('root@%s' % location, - ['ln', '-f', kernel_dest, - os.path.join(tftp_dir, versioned_kernel_name)]) - - cliapp.ssh_runcmd('root@%s' % location, - ['ln', '-sf', versioned_kernel_name, - os.path.join(tftp_dir, kernel_name)]) - except cliapp.AppException: - raise cliapp.AppException('Could not create symlinks to the ' - 'kernel at %s in %s on %s' - % (kernel_dest, tftp_dir, location)) - - def copy_rootfs(self, temp_root, location, versioned_root, hostname): - rootfs_src = temp_root + '/' - orig_path = os.path.join(versioned_root, 'orig') - run_path = os.path.join(versioned_root, 'run') - - self.status(msg='Creating destination directories') - try: - cliapp.ssh_runcmd('root@%s' % location, - ['mkdir', '-p', orig_path, run_path]) - except cliapp.AppException: - raise cliapp.AppException('Could not create dirs %s and %s on %s' - % (orig_path, run_path, location)) - - self.status(msg='Creating \'orig\' rootfs') - cliapp.runcmd( - ['rsync', '-asXSPH', '--delete', rootfs_src, - 'root@%s:%s' % (location, orig_path)]) - - self.status(msg='Creating \'run\' rootfs') - try: - cliapp.ssh_runcmd('root@%s' % location, - ['rm', '-rf', run_path]) - cliapp.ssh_runcmd('root@%s' % location, - ['cp', '-al', orig_path, run_path]) - cliapp.ssh_runcmd('root@%s' % location, - ['rm', '-rf', os.path.join(run_path, 'etc')]) - cliapp.ssh_runcmd('root@%s' % location, - ['cp', '-a', os.path.join(orig_path, 'etc'), - os.path.join(run_path, 'etc')]) - except cliapp.AppException: - raise cliapp.AppException('Could not create \'run\' rootfs' - ' from \'orig\'') - - self.status(msg='Linking \'default\' to latest system') - try: - cliapp.ssh_runcmd('root@%s' % location, - ['ln', '-sfn', versioned_root, - os.path.join(self._nfsboot_root, hostname, 'systems', - 'default')]) - except cliapp.AppException: - raise cliapp.AppException('Could not link \'default\' to %s' - % versioned_root) - - def configure_nfs(self, location, hostname): - exported_path = os.path.join(self._nfsboot_root, hostname) - exports_path = '/etc/exports' - # If that path is not already exported: - try: - cliapp.ssh_runcmd( - 'root@%s' % location, ['grep', '-q', exported_path, - exports_path]) - except cliapp.AppException: - ip_mask = '*' - options = 'rw,no_subtree_check,no_root_squash,async' - exports_string = '%s %s(%s)\n' % (exported_path, ip_mask, options) - exports_append_sh = '''\ -set -eu -target="$1" -temp=$(mktemp) -cat "$target" > "$temp" -cat >> "$temp" -mv "$temp" "$target" -''' - cliapp.ssh_runcmd( - 'root@%s' % location, - ['sh', '-c', exports_append_sh, '--', exports_path], - feed_stdin=exports_string) - cliapp.ssh_runcmd( - 'root@%s' % location, ['systemctl', 'restart', - 'nfs-server.service']) - - -NFSBootWriteExtension().run() diff --git a/nfsboot.write.help b/nfsboot.write.help deleted file mode 100644 index 186c479a..00000000 --- a/nfsboot.write.help +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - *** DO NOT USE *** - - This was written before 'proper' deployment mechanisms were in place. - It is unlikely to work at all, and will not work correctly. - - Use the pxeboot write extension instead - - *** - Deploy a system image and kernel to an nfsboot server. - - An nfsboot server is defined as a baserock system that has - tftp and nfs servers running, the tftp server is exporting - the contents of /srv/nfsboot/tftp/ and the user has sufficient - permissions to create nfs roots in /srv/nfsboot/nfs/. - - The `location` argument is the hostname of the nfsboot server. - - The extension will connect to root@HOST via ssh to copy the - kernel and rootfs, and configure the nfs server. diff --git a/openstack-ceilometer.configure b/openstack-ceilometer.configure deleted file mode 100644 index 9c0b7b6d..00000000 --- a/openstack-ceilometer.configure +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool CEILOMETER_ENABLE_CONTROLLER -check_bool CEILOMETER_ENABLE_COMPUTE - -if ! "$CEILOMETER_ENABLE_CONTROLLER" && \ - ! "$CEILOMETER_ENABLE_COMPUTE"; then - exit 0 -fi - -if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ - -z "$CEILOMETER_SERVICE_USER" -o \ - -z "$CEILOMETER_SERVICE_PASSWORD" -o \ - -z "$CEILOMETER_DB_USER" -o \ - -z "$CEILOMETER_DB_PASSWORD" -o \ - -z "$METERING_SECRET" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$CONTROLLER_HOST_ADDRESS" ]; then - echo Some options required for Ceilometer were defined, but not all. - exit 1 -fi - -###################################### -# Enable relevant openstack services # -###################################### - -if "$CEILOMETER_ENABLE_COMPUTE" || "$CEILOMETER_ENABLE_CONTROLLER"; then - enable openstack-ceilometer-config-setup -fi -if "$CEILOMETER_ENABLE_COMPUTE"; then - enable openstack-ceilometer-compute -fi -if "$CEILOMETER_ENABLE_CONTROLLER"; then - enable openstack-ceilometer-db-setup - enable openstack-ceilometer-api - enable openstack-ceilometer-collector - enable openstack-ceilometer-notification - enable openstack-ceilometer-central - enable openstack-ceilometer-alarm-evaluator - enable openstack-ceilometer-alarm-notifier -fi - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/ceilometer.conf" -import os, sys, yaml - -ceilometer_configuration={ - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - 'CEILOMETER_SERVICE_PASSWORD': os.environ['CEILOMETER_SERVICE_PASSWORD'], - 'CEILOMETER_SERVICE_USER': os.environ['CEILOMETER_SERVICE_USER'], - 'CEILOMETER_DB_USER': os.environ['CEILOMETER_DB_USER'], - 'CEILOMETER_DB_PASSWORD': os.environ['CEILOMETER_DB_PASSWORD'], - 'METERING_SECRET': os.environ['METERING_SECRET'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], -} - -yaml.dump(ceilometer_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-cinder.configure b/openstack-cinder.configure deleted file mode 100644 index 4c32e11a..00000000 --- a/openstack-cinder.configure +++ /dev/null @@ -1,125 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool CINDER_ENABLE_CONTROLLER -check_bool CINDER_ENABLE_COMPUTE -check_bool CINDER_ENABLE_STORAGE - -if ! "$CINDER_ENABLE_CONTROLLER" && \ - ! "$CINDER_ENABLE_COMPUTE" && \ - ! "$CINDER_ENABLE_STORAGE"; then - exit 0 -fi - -if [ -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ - -z "$CINDER_DB_USER" -o \ - -z "$CINDER_DB_PASSWORD" -o \ - -z "$CONTROLLER_HOST_ADDRESS" -o \ - -z "$CINDER_SERVICE_USER" -o \ - -z "$CINDER_SERVICE_PASSWORD" -o \ - -z "$CINDER_DEVICE" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" ]; then - echo Some options required for Cinder were defined, but not all. - exit 1 -fi - -###################################### -# Enable relevant openstack services # -###################################### - -if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_STORAGE"; then - enable iscsi-setup - enable target #target.service! - enable iscsid -fi -if "$CINDER_ENABLE_COMPUTE" || "$CINDER_ENABLE_CONTROLLER" || "$CINDER_ENABLE_STORAGE"; then - enable openstack-cinder-config-setup -fi -if "$CINDER_ENABLE_STORAGE"; then - enable openstack-cinder-lv-setup - enable lvm2-lvmetad - enable openstack-cinder-volume - enable openstack-cinder-backup - enable openstack-cinder-scheduler -fi -if "$CINDER_ENABLE_CONTROLLER"; then - enable openstack-cinder-db-setup - enable openstack-cinder-api -fi - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/cinder.conf" -import os, sys, yaml - -cinder_configuration={ - 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER':os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'], - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN':os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - 'CINDER_DB_USER':os.environ['CINDER_DB_USER'], - 'CINDER_DB_PASSWORD':os.environ['CINDER_DB_PASSWORD'], - 'CONTROLLER_HOST_ADDRESS':os.environ['CONTROLLER_HOST_ADDRESS'], - 'CINDER_SERVICE_USER':os.environ['CINDER_SERVICE_USER'], - 'CINDER_SERVICE_PASSWORD':os.environ['CINDER_SERVICE_PASSWORD'], - 'CINDER_DEVICE':os.environ['CINDER_DEVICE'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS':os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], -} - -yaml.dump(cinder_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-glance.configure b/openstack-glance.configure deleted file mode 100644 index 5da08895..00000000 --- a/openstack-glance.configure +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool GLANCE_ENABLE_SERVICE - -if ! "$GLANCE_ENABLE_SERVICE"; then - exit 0 -fi - -if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ - -z "$GLANCE_SERVICE_USER" -o \ - -z "$GLANCE_SERVICE_PASSWORD" -o \ - -z "$GLANCE_DB_USER" -o \ - -z "$GLANCE_DB_PASSWORD" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$CONTROLLER_HOST_ADDRESS" ]; then - echo Some options required for Glance were defined, but not all. - exit 1 -fi - -###################################### -# Enable relevant openstack services # -###################################### - -enable openstack-glance-setup - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/glance.conf" -import os, sys, yaml - -glance_configuration={ - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - 'GLANCE_SERVICE_PASSWORD': os.environ['GLANCE_SERVICE_PASSWORD'], - 'GLANCE_SERVICE_USER': os.environ['GLANCE_SERVICE_USER'], - 'GLANCE_DB_USER': os.environ['GLANCE_DB_USER'], - 'GLANCE_DB_PASSWORD': os.environ['GLANCE_DB_PASSWORD'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], -} - -yaml.dump(glance_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-ironic.configure b/openstack-ironic.configure deleted file mode 100644 index 962bbcd1..00000000 --- a/openstack-ironic.configure +++ /dev/null @@ -1,155 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool IRONIC_ENABLE_SERVICE - -if ! "$IRONIC_ENABLE_SERVICE"; then - exit 0 -fi - -if [ -z "$IRONIC_SERVICE_USER" -o \ - -z "$IRONIC_SERVICE_PASSWORD" -o \ - -z "$IRONIC_DB_USER" -o \ - -z "$IRONIC_DB_PASSWORD" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$CONTROLLER_HOST_ADDRESS" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then - echo Some options required for Ironic were defined, but not all. - exit 1 -fi - -###################################### -# Enable relevant openstack services # -###################################### - -enable openstack-ironic-setup -enable iscsi-setup -enable target #target.service! -enable iscsid - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/ironic.conf" -import os, sys, yaml - -ironic_configuration={ - 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'], - 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'], - 'IRONIC_DB_USER': os.environ['IRONIC_DB_USER'], - 'IRONIC_DB_PASSWORD': os.environ['IRONIC_DB_PASSWORD'], - 'RABBITMQ_HOST':os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT':os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER':os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD':os.environ['RABBITMQ_PASSWORD'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - -} - -yaml.dump(ironic_configuration, sys.stdout, default_flow_style=False) -EOF - -########################################################################## -# Configure the TFTP service # -########################################################################## - -tftp_root="/srv/tftp_root/" # trailing slash is essential -mkdir -p "$ROOT/$tftp_root" - -install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.service" << 'EOF' -[Unit] -Description=tftp service for booting kernels -After=network-online.target -Wants=network-online.target - -[Service] -Type=simple -EnvironmentFile=/etc/tftp-hpa.conf -ExecStart=/usr/sbin/in.tftpd $TFTP_OPTIONS ${TFTP_ROOT} -StandardInput=socket -StandardOutput=inherit -StandardError=journal - -[Install] -WantedBy=multi-user.target -EOF - -install -D /dev/stdin -m 644 "$ROOT/usr/lib/systemd/system/tftp-hpa.socket" << EOF -[Unit] -Description=Tftp server activation socket - -[Socket] -ListenDatagram=$MANAGEMENT_INTERFACE_IP_ADDRESS:69 -FreeBind=yes - -[Install] -WantedBy=sockets.target -EOF - -install -D -m 644 /dev/stdin "$ROOT"/etc/tftp-hpa.conf << EOF -TFTP_ROOT=$tftp_root -TFTP_OPTIONS="-v -v -v -v -v --map-file $tftp_root/map-file" -EOF - -install -D /dev/stdin -m 644 "$ROOT/$tftp_root"/map-file << EOF -r ^([^/]) $tftp_root\1 -r ^/tftpboot/ $tftp_root\2 -EOF - -cp "$ROOT"/usr/share/syslinux/pxelinux.0 "$ROOT/$tftp_root" diff --git a/openstack-keystone.configure b/openstack-keystone.configure deleted file mode 100644 index 6b011b14..00000000 --- a/openstack-keystone.configure +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool KEYSTONE_ENABLE_SERVICE - -if ! "$KEYSTONE_ENABLE_SERVICE"; then - exit 0 -fi - -if [ -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" -o \ - -z "$KEYSTONE_ADMIN_PASSWORD" -o \ - -z "$KEYSTONE_DB_USER" -o \ - -z "$KEYSTONE_DB_PASSWORD" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$CONTROLLER_HOST_ADDRESS" ]; then - echo Some options required for Keystone were defined, but not all. - exit 1 -fi - -python <<'EOF' -import socket -import sys -import os - -try: - socket.inet_pton(socket.AF_INET, os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS']) -except: - print "Error: MANAGEMENT_INTERFACE_IP_ADDRESS is not a valid IP" - sys.exit(1) -EOF - -###################################### -# Enable relevant openstack services # -###################################### - -enable openstack-keystone-setup -enable openstack-horizon-setup -enable postgres-server-setup - -########################################################################## -# Generate configuration file -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/keystone.conf" -import os, sys, yaml - -keystone_configuration={ - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], - 'KEYSTONE_ADMIN_PASSWORD': os.environ['KEYSTONE_ADMIN_PASSWORD'], - 'KEYSTONE_DB_USER': os.environ['KEYSTONE_DB_USER'], - 'KEYSTONE_DB_PASSWORD': os.environ['KEYSTONE_DB_PASSWORD'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], -} - -yaml.dump(keystone_configuration, sys.stdout, default_flow_style=False) -EOF - -python << 'EOF' > "$OPENSTACK_DATA/postgres.conf" -import os, sys, yaml - -postgres_configuration={ - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], -} - -yaml.dump(postgres_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-network.configure b/openstack-network.configure deleted file mode 100644 index 10be5a1c..00000000 --- a/openstack-network.configure +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/$1.service" -} - -################### -# Enable services # -################### - -enable openvswitch-setup -enable openstack-network-setup - -########################################################################## -# Generate config variable shell snippet -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/network.conf" -import os, sys, yaml - -network_configuration = {} - -optional_keys = ('EXTERNAL_INTERFACE',) - -network_configuration.update((k, os.environ[k]) for k in optional_keys if k in os.environ) - -yaml.dump(network_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-neutron.configure b/openstack-neutron.configure deleted file mode 100644 index 210222db..00000000 --- a/openstack-neutron.configure +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/openstack-neutron-$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-neutron-$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool NEUTRON_ENABLE_CONTROLLER -check_bool NEUTRON_ENABLE_MANAGER -check_bool NEUTRON_ENABLE_AGENT - -if ! "$NEUTRON_ENABLE_CONTROLLER" && \ - ! "$NEUTRON_ENABLE_MANAGER" && \ - ! "$NEUTRON_ENABLE_AGENT"; then - exit 0 -fi - -if [ -z "$NEUTRON_SERVICE_USER" -o \ - -z "$NEUTRON_SERVICE_PASSWORD" -o \ - -z "$NEUTRON_DB_USER" -o \ - -z "$NEUTRON_DB_PASSWORD" -o \ - -z "$METADATA_PROXY_SHARED_SECRET" -o \ - -z "$NOVA_SERVICE_USER" -o \ - -z "$NOVA_SERVICE_PASSWORD" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$CONTROLLER_HOST_ADDRESS" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then - echo Some options required for Neutron were defined, but not all. - exit 1 -fi - -############################################# -# Ensure /var/run is an appropriate symlink # -############################################# - -if ! link="$(readlink "$ROOT/var/run")" || [ "$link" != ../run ]; then - rm -rf "$ROOT/var/run" - ln -s ../run "$ROOT/var/run" -fi - -################### -# Enable services # -################### - -if "$NEUTRON_ENABLE_CONTROLLER"; then - enable config-setup - enable db-setup - enable server -fi - -if "$NEUTRON_ENABLE_MANAGER"; then - enable config-setup - enable ovs-cleanup - enable dhcp-agent - enable l3-agent - enable plugin-openvswitch-agent - enable metadata-agent -fi - -if "$NEUTRON_ENABLE_AGENT"; then - enable config-setup - enable plugin-openvswitch-agent -fi - -########################################################################## -# Generate config variable shell snippet -########################################################################## - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/neutron.conf" -import os, sys, yaml - -nova_configuration={ - 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'], - 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'], - 'NEUTRON_DB_USER': os.environ['NEUTRON_DB_USER'], - 'NEUTRON_DB_PASSWORD': os.environ['NEUTRON_DB_PASSWORD'], - 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'], - 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'], - 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], -} - -yaml.dump(nova_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-nova.configure b/openstack-nova.configure deleted file mode 100644 index 213f1852..00000000 --- a/openstack-nova.configure +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/sh - -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -ROOT="$1" - -enable(){ - ln -sf "/usr/lib/systemd/system/openstack-nova-$1.service" \ - "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-$1.service" -} - -unnaceptable(){ - eval echo Unexpected value \$$1 for $1 >&2 - exit 1 -} - -check_bool(){ - case "$(eval echo \"\$$1\")" in - True|'') - eval "$1=true" - ;; - False) - eval "$1=false" - ;; - *) - unnaceptable "$1" - ;; - esac -} - -########################################################################## -# Check variables -########################################################################## - -check_bool NOVA_ENABLE_CONTROLLER -check_bool NOVA_ENABLE_COMPUTE - -if ! "$NOVA_ENABLE_CONTROLLER" && \ - ! "$NOVA_ENABLE_COMPUTE"; then - exit 0 -fi - -if [ -z "$NOVA_SERVICE_USER" -o \ - -z "$NOVA_SERVICE_PASSWORD" -o \ - -z "$NOVA_DB_USER" -o \ - -z "$NOVA_DB_PASSWORD" -o \ - -z "$NOVA_VIRT_TYPE" -o \ - -z "$NEUTRON_SERVICE_USER" -o \ - -z "$NEUTRON_SERVICE_PASSWORD" -o \ - -z "$IRONIC_SERVICE_USER" -a \ - -z "$IRONIC_SERVICE_PASSWORD" -a \ - -z "$METADATA_PROXY_SHARED_SECRET" -o \ - -z "$RABBITMQ_HOST" -o \ - -z "$RABBITMQ_USER" -o \ - -z "$RABBITMQ_PASSWORD" -o \ - -z "$RABBITMQ_PORT" -o \ - -z "$CONTROLLER_HOST_ADDRESS" -o \ - -z "$MANAGEMENT_INTERFACE_IP_ADDRESS" -o \ - -z "$KEYSTONE_TEMPORARY_ADMIN_TOKEN" ]; then - echo Some options required for Nova were defined, but not all. - exit 1 -fi - -############################################### -# Enable libvirtd and libvirt-guests services # -############################################### - -wants_dir="$ROOT"/usr/lib/systemd/system/multi-user.target.wants -mkdir -p "$wants_dir" -mkdir -p "$ROOT"/var/lock/subsys -ln -sf ../libvirtd.service "$wants_dir/libvirtd.service" - -###################################### -# Enable relevant openstack services # -###################################### - -if "$NOVA_ENABLE_CONTROLLER" || "$NOVA_ENABLE_COMPUTE"; then - enable config-setup -fi -if "$NOVA_ENABLE_CONTROLLER" && ! "$NOVA_ENABLE_COMPUTE"; then - enable conductor -fi -if "$NOVA_ENABLE_COMPUTE"; then - enable compute -fi -if "$NOVA_ENABLE_CONTROLLER"; then - for service in db-setup api cert consoleauth novncproxy scheduler serialproxy; do - enable "$service" - done -fi - -########################################################################## -# Change iprange for the interal libvirt to avoid clashes -# with eth0 ip range -########################################################################## - -sed -i "s/192\.168\.122\./192\.168\.1\./g" \ - "$ROOT"/etc/libvirt/qemu/networks/default.xml - - -########################################################################## -# Generate configuration file -########################################################################## - -case "$NOVA_BAREMETAL_SCHEDULING" in - True|true|yes) - export COMPUTE_MANAGER=ironic.nova.compute.manager.ClusteredComputeManager - export RESERVED_HOST_MEMORY_MB=0 - export SCHEDULER_HOST_MANAGER=nova.scheduler.ironic_host_manager.IronicHostManager - export RAM_ALLOCATION_RATIO=1.0 - export COMPUTE_DRIVER=nova.virt.ironic.IronicDriver - ;; - *) - export COMPUTE_MANAGER=nova.compute.manager.ComputeManager - export RESERVED_HOST_MEMORY_MB=512 - export SCHEDULER_HOST_MANAGER=nova.scheduler.host_manager.HostManager - export RAM_ALLOCATION_RATIO=1.5 - export COMPUTE_DRIVER=libvirt.LibvirtDriver - ;; -esac - -OPENSTACK_DATA="$ROOT/etc/openstack" -mkdir -p "$OPENSTACK_DATA" - -python <<'EOF' >"$OPENSTACK_DATA/nova.conf" -import os, sys, yaml - -nova_configuration={ - 'NOVA_SERVICE_USER': os.environ['NOVA_SERVICE_USER'], - 'NOVA_SERVICE_PASSWORD': os.environ['NOVA_SERVICE_PASSWORD'], - 'NOVA_DB_USER': os.environ['NOVA_DB_USER'], - 'NOVA_DB_PASSWORD': os.environ['NOVA_DB_PASSWORD'], - 'NOVA_VIRT_TYPE': os.environ['NOVA_VIRT_TYPE'], - 'COMPUTE_MANAGER': os.environ['COMPUTE_MANAGER'], - 'RESERVED_HOST_MEMORY_MB': os.environ['RESERVED_HOST_MEMORY_MB'], - 'SCHEDULER_HOST_MANAGER': os.environ['SCHEDULER_HOST_MANAGER'], - 'RAM_ALLOCATION_RATIO': os.environ['RAM_ALLOCATION_RATIO'], - 'COMPUTE_DRIVER': os.environ['COMPUTE_DRIVER'], - 'NEUTRON_SERVICE_USER': os.environ['NEUTRON_SERVICE_USER'], - 'NEUTRON_SERVICE_PASSWORD': os.environ['NEUTRON_SERVICE_PASSWORD'], - 'IRONIC_SERVICE_USER': os.environ['IRONIC_SERVICE_USER'], - 'IRONIC_SERVICE_PASSWORD': os.environ['IRONIC_SERVICE_PASSWORD'], - 'METADATA_PROXY_SHARED_SECRET': os.environ['METADATA_PROXY_SHARED_SECRET'], - 'RABBITMQ_HOST': os.environ['RABBITMQ_HOST'], - 'RABBITMQ_USER': os.environ['RABBITMQ_USER'], - 'RABBITMQ_PASSWORD': os.environ['RABBITMQ_PASSWORD'], - 'RABBITMQ_PORT': os.environ['RABBITMQ_PORT'], - 'CONTROLLER_HOST_ADDRESS': os.environ['CONTROLLER_HOST_ADDRESS'], - 'MANAGEMENT_INTERFACE_IP_ADDRESS': os.environ['MANAGEMENT_INTERFACE_IP_ADDRESS'], - 'KEYSTONE_TEMPORARY_ADMIN_TOKEN': os.environ['KEYSTONE_TEMPORARY_ADMIN_TOKEN'], -} - -yaml.dump(nova_configuration, sys.stdout, default_flow_style=False) -EOF diff --git a/openstack-swift-controller.configure b/openstack-swift-controller.configure deleted file mode 100644 index 424ab57b..00000000 --- a/openstack-swift-controller.configure +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -# -# Copyright © 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -set -e - -export ROOT="$1" - -MANDATORY_OPTIONS="SWIFT_ADMIN_PASSWORD KEYSTONE_TEMPORARY_ADMIN_TOKEN" - -for option in $MANDATORY_OPTIONS -do - if ! [[ -v $option ]] - then - missing_option=True - echo "Required option $option isn't set!" >&2 - fi -done - -if [[ $missing_option = True ]]; then exit 1; fi - -mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks - -ln -s "/usr/lib/systemd/system/swift-controller-setup.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-controller-setup.service" -ln -s "/usr/lib/systemd/system/memcached.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/memcached.service" -ln -s "/usr/lib/systemd/system/openstack-swift-proxy.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-proxy.service" - -cat << EOF > "$ROOT"/usr/share/openstack/swift-controller-vars.yml ---- -SWIFT_ADMIN_PASSWORD: $SWIFT_ADMIN_PASSWORD -MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS -KEYSTONE_TEMPORARY_ADMIN_TOKEN: $KEYSTONE_TEMPORARY_ADMIN_TOKEN -EOF diff --git a/openstack.check b/openstack.check deleted file mode 100755 index a3379763..00000000 --- a/openstack.check +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -'''Preparatory checks for Morph 'openstack' write extension''' - -import cliapp -import os -import urlparse -import keystoneclient - -import morphlib.writeexts - - -class OpenStackCheckExtension(morphlib.writeexts.WriteExtension): - - def process_args(self, args): - if len(args) != 1: - raise cliapp.AppException('Wrong number of command line args') - - self.require_btrfs_in_deployment_host_kernel() - - upgrade = self.get_environment_boolean('UPGRADE') - if upgrade: - raise cliapp.AppException( - 'Use the `ssh-rsync` write extension to deploy upgrades to an ' - 'existing remote system.') - - location = args[0] - self.check_location(location) - - self.check_imagename() - self.check_openstack_parameters(self._get_auth_parameters(location)) - - def _get_auth_parameters(self, location): - '''Check the environment variables needed and returns all. - - The environment variables are described in the class documentation. - ''' - - auth_keys = {'OPENSTACK_USER': 'username', - 'OPENSTACK_TENANT': 'tenant_name', - 'OPENSTACK_PASSWORD': 'password'} - - for key in auth_keys: - if os.environ.get(key, '') == '': - raise cliapp.AppException(key + ' was not given') - - auth_params = {auth_keys[key]: os.environ[key] for key in auth_keys} - auth_params['auth_url'] = location - return auth_params - - def check_imagename(self): - if os.environ.get('OPENSTACK_IMAGENAME', '') == '': - raise cliapp.AppException('OPENSTACK_IMAGENAME was not given') - - def check_location(self, location): - x = urlparse.urlparse(location) - if x.scheme not in ['http', 'https']: - raise cliapp.AppException('URL schema must be http or https in %s'\ - % location) - if (x.path != '/v2.0' and x.path != '/v2.0/'): - raise cliapp.AppException('API version must be v2.0 in %s'\ - % location) - - def check_openstack_parameters(self, auth_params): - ''' Check that we can connect to and authenticate with openstack ''' - - self.status(msg='Checking OpenStack credentials...') - - try: - keystoneclient.v2_0.Client(**auth_params) - except keystoneclient.exceptions.Unauthorized: - errmsg = ('Failed to authenticate with OpenStack ' - '(are your credentials correct?)') - raise cliapp.AppException(errmsg) - - -OpenStackCheckExtension().run() diff --git a/openstack.write b/openstack.write deleted file mode 100755 index 67e07c18..00000000 --- a/openstack.write +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -'''A Morph deployment write extension for deploying to OpenStack.''' - - -import cliapp -import os -import tempfile -import urlparse - -import morphlib.writeexts - - -class OpenStackWriteExtension(morphlib.writeexts.WriteExtension): - - '''See openstack.write.help for documentation''' - - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - temp_root, location = args - - os_params = self.get_openstack_parameters() - - fd, raw_disk = tempfile.mkstemp() - os.close(fd) - self.create_local_system(temp_root, raw_disk) - self.status(msg='Temporary disk image has been created at %s' - % raw_disk) - - self.set_extlinux_root_to_virtio(raw_disk) - - self.configure_openstack_image(raw_disk, location, os_params) - - def set_extlinux_root_to_virtio(self, raw_disk): - '''Re-configures extlinux to use virtio disks''' - self.status(msg='Updating extlinux.conf') - with self.mount(raw_disk) as mp: - path = os.path.join(mp, 'extlinux.conf') - - with open(path) as f: - extlinux_conf = f.read() - - extlinux_conf = extlinux_conf.replace('root=/dev/sda', - 'root=/dev/vda') - with open(path, "w") as f: - f.write(extlinux_conf) - - def get_openstack_parameters(self): - '''Get the environment variables needed. - - The environment variables are described in the class documentation. - ''' - - keys = ('OPENSTACK_USER', 'OPENSTACK_TENANT', - 'OPENSTACK_IMAGENAME', 'OPENSTACK_PASSWORD') - return (os.environ[key] for key in keys) - - def configure_openstack_image(self, raw_disk, auth_url, os_params): - '''Configure the image in OpenStack using glance-client''' - self.status(msg='Configuring OpenStack image...') - - username, tenant_name, image_name, password = os_params - cmdline = ['glance', - '--os-username', username, - '--os-tenant-name', tenant_name, - '--os-password', password, - '--os-auth-url', auth_url, - 'image-create', - '--name=%s' % image_name, - '--disk-format=raw', - '--container-format', 'bare', - '--file', raw_disk] - cliapp.runcmd(cmdline) - - self.status(msg='Image configured.') - -OpenStackWriteExtension().run() diff --git a/openstack.write.help b/openstack.write.help deleted file mode 100644 index 26983060..00000000 --- a/openstack.write.help +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - - Deploy a Baserock system as a *new* OpenStack virtual machine. - (Use the `ssh-rsync` write extension to deploy upgrades to an *existing* - VM) - - Deploys the system to the OpenStack host using python-glanceclient. - - Parameters: - - * location: the authentication url of the OpenStack server using the - following syntax: - - http://HOST:PORT/VERSION - - where - - * HOST is the host running OpenStack - * PORT is the port which is using OpenStack for authentications. - * VERSION is the authentication version of OpenStack (Only v2.0 - supported) - - * OPENSTACK_USER=username: the username to use in the `--os-username` - argument to `glance`. - - * OPENSTACK_TENANT=tenant: the project name to use in the - `--os-tenant-name` argument to `glance`. - - * OPENSTACK_IMAGENAME=imagename: the name of the image to use in the - `--name` argument to `glance`. - - * OPENSTACK_PASSWORD=password: the password of the OpenStack user. (We - recommend passing this on the command-line, rather than setting an - environment variable or storing it in a cluster cluster definition file.) - - (See `morph help deploy` for details of how to pass parameters to write - extensions) diff --git a/pxeboot.check b/pxeboot.check deleted file mode 100755 index 611708a9..00000000 --- a/pxeboot.check +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/python - -import itertools -import os -import subprocess -import sys -flatten = itertools.chain.from_iterable - -def powerset(iterable): - "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" - s = list(iterable) - return flatten(itertools.combinations(s, r) for r in range(len(s)+1)) - -valid_option_sets = frozenset(( - ('spawn-novlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE',))), - ('spawn-vlan', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', 'PXEBOOT_VLAN'))), - ('existing-dhcp', frozenset(('PXEBOOT_DEPLOYER_INTERFACE', - 'PXEBOOT_CONFIG_TFTP_ADDRESS'))), - ('existing-server', frozenset(('PXEBOOT_CONFIG_TFTP_ADDRESS', - 'PXEBOOT_ROOTFS_RSYNC_ADDRESS'))), -)) -valid_modes = frozenset(mode for mode, opt_set in valid_option_sets) - - -def compute_matches(env): - complete_matches = set() - for mode, opt_set in valid_option_sets: - if all(k in env for k in opt_set): - complete_matches.add(opt_set) - return complete_matches - -complete_matches = compute_matches(os.environ) - -def word_separate_options(options): - assert options - s = options.pop(-1) - if options: - s = '%s and %s' % (', '.join(options), s) - return s - - -valid_options = frozenset(flatten(opt_set for (mode, opt_set) - in valid_option_sets)) -matched_options = frozenset(o for o in valid_options - if o in os.environ) -if not complete_matches: - addable_sets = frozenset(frozenset(os) - matched_options for os in - valid_options - if frozenset(os) - matched_options) - print('Please provide %s' % ' or '.join( - word_separate_options(list(opt_set)) - for opt_set in addable_sets if opt_set)) - sys.exit(1) -elif len(complete_matches) > 1: - removable_sets = frozenset(matched_options - frozenset(os) for os in - powerset(matched_options) - if len(compute_matches(os)) == 1) - print('WARNING: Following options might not be needed: %s' % ' or '.join( - word_separate_options(list(opt_set)) - for opt_set in removable_sets if opt_set)) - -if 'PXEBOOT_MODE' in os.environ: - mode = os.environ['PXEBOOT_MODE'] -else: - try: - mode, = (mode for (mode, opt_set) in valid_option_sets - if all(o in os.environ for o in opt_set)) - - except ValueError as e: - print ('More than one candidate for PXEBOOT_MODE, please ' - 'set a value for it. Type `morph help pxeboot.write for ' - 'more info') - sys.exit(1) - -if mode not in valid_modes: - print('%s is not a valid PXEBOOT_MODE' % mode) - sys.exit(1) - -if mode != 'existing-server': - with open(os.devnull, 'w') as devnull: - if subprocess.call(['systemctl', 'is-active', 'nfs-server'], - stdout=devnull) != 0: - print ('ERROR: nfs-server.service is not running and is needed ' - 'for this deployment. Please, run `systemctl start nfs-server` ' - 'and try `morph deploy` again.') - sys.exit(1) diff --git a/pxeboot.write b/pxeboot.write deleted file mode 100644 index 3a12ebcc..00000000 --- a/pxeboot.write +++ /dev/null @@ -1,755 +0,0 @@ -#!/usr/bin/env python - - -import collections -import contextlib -import errno -import itertools -import logging -import os -import select -import signal -import shutil -import socket -import string -import StringIO -import subprocess -import sys -import tempfile -import textwrap -import urlparse - -import cliapp - -import morphlib - - -def _int_to_quad_dot(i): - return '.'.join(( - str(i >> 24 & 0xff), - str(i >> 16 & 0xff), - str(i >> 8 & 0xff), - str(i & 0xff))) - - -def _quad_dot_to_int(s): - i = 0 - for octet in s.split('.'): - i <<= 8 - i += int(octet, 10) - return i - - -def _netmask_to_prefixlen(mask): - bs = '{:032b}'.format(mask) - prefix = bs.rstrip('0') - if '0' in prefix: - raise ValueError('abnormal netmask: %s' % - _int_to_quad_dot(mask)) - return len(prefix) - - -def _get_routes(): - routes = [] - with open('/proc/net/route', 'r') as f: - for line in list(f)[1:]: - fields = line.split() - destination, flags, mask = fields[1], fields[3], fields[7] - flags = int(flags, 16) - if flags & 2: - # default route, ignore - continue - destination = socket.ntohl(int(destination, 16)) - mask = socket.ntohl(int(mask, 16)) - prefixlen = _netmask_to_prefixlen(mask) - routes.append((destination, prefixlen)) - return routes - - -class IPRange(object): - def __init__(self, prefix, prefixlen): - self.prefixlen = prefixlen - mask = (1 << prefixlen) - 1 - self.mask = mask << (32 - prefixlen) - self.prefix = prefix & self.mask - @property - def bitstring(self): - return ('{:08b}' * 4).format( - self.prefix >> 24 & 0xff, - self.prefix >> 16 & 0xff, - self.prefix >> 8 & 0xff, - self.prefix & 0xff - )[:self.prefixlen] - def startswith(self, other_range): - return self.bitstring.startswith(other_range.bitstring) - - -def find_subnet(valid_ranges, invalid_ranges): - for vr in valid_ranges: - known_subnets = set(ir for ir in invalid_ranges if ir.startswith(vr)) - prefixlens = set(r.prefixlen for r in known_subnets) - prefixlens.add(32 - 2) # need at least 4 addresses in subnet - prefixlen = min(prefixlens) - if prefixlen <= vr.prefixlen: - # valid subnet is full, move on to next - continue - subnetlen = prefixlen - vr.prefixlen - for prefix in (subnetid + vr.prefix - for subnetid in xrange(1 << subnetlen)): - if any(subnet.prefix == prefix for subnet in known_subnets): - continue - return prefix, prefixlen - - -def _normalise_macaddr(macaddr): - '''pxelinux.0 wants the mac address to be lowercase and - separated''' - digits = (c for c in macaddr.lower() if c in string.hexdigits) - nibble_pairs = grouper(digits, 2) - return '-'.join(''.join(byte) for byte in nibble_pairs) - - -@contextlib.contextmanager -def executor(target_pid): - 'Kills a process if its parent dies' - read_fd, write_fd = os.pipe() - helper_pid = os.fork() - if helper_pid == 0: - try: - os.close(write_fd) - while True: - rlist, _, _ = select.select([read_fd], [], []) - if read_fd in rlist: - d = os.read(read_fd, 1) - if not d: - os.kill(target_pid, signal.SIGKILL) - if d in ('', 'Q'): - os._exit(0) - else: - os._exit(1) - except BaseException as e: - import traceback - traceback.print_exc() - os._exit(1) - os.close(read_fd) - yield - os.write(write_fd, 'Q') - os.close(write_fd) - - -def grouper(iterable, n, fillvalue=None): - "Collect data into fixed-length chunks or blocks" - # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" - args = [iter(iterable)] * n - return itertools.izip_longest(*args, fillvalue=fillvalue) - - -class PXEBoot(morphlib.writeexts.WriteExtension): - @contextlib.contextmanager - def _vlan(self, interface, vlan): - viface = '%s.%s' % (interface, vlan) - self.status(msg='Creating vlan %(viface)s', viface=viface) - subprocess.check_call(['vconfig', 'add', interface, str(vlan)]) - try: - yield viface - finally: - self.status(msg='Destroying vlan %(viface)s', viface=viface) - subprocess.call(['vconfig', 'rem', viface]) - - @contextlib.contextmanager - def _static_ip(self, iface): - valid_ranges = set(( - IPRange(_quad_dot_to_int('192.168.0.0'), 16), - IPRange(_quad_dot_to_int('172.16.0.0'), 12), - IPRange(_quad_dot_to_int('10.0.0.0'), 8), - )) - invalid_ranges = set(IPRange(prefix, prefixlen) - for (prefix, prefixlen) in _get_routes()) - prefix, prefixlen = find_subnet(valid_ranges, invalid_ranges) - netaddr = prefix - dhcp_server_ip = netaddr + 1 - client_ip = netaddr + 2 - broadcast_ip = prefix | ((1 << (32 - prefixlen)) - 1) - self.status(msg='Assigning ip address %(ip)s/%(prefixlen)d to ' - 'iface %(iface)s', - ip=_int_to_quad_dot(dhcp_server_ip), prefixlen=prefixlen, - iface=iface) - subprocess.check_call(['ip', 'addr', 'add', - '{}/{}'.format(_int_to_quad_dot(dhcp_server_ip), - prefixlen), - 'broadcast', _int_to_quad_dot(broadcast_ip), - 'scope', 'global', - 'dev', iface]) - try: - yield (dhcp_server_ip, client_ip, broadcast_ip) - finally: - self.status(msg='Removing ip addresses from iface %(iface)s', - iface=iface) - subprocess.call(['ip', 'addr', 'flush', 'dev', iface]) - - @contextlib.contextmanager - def _up_interface(self, iface): - self.status(msg='Bringing interface %(iface)s up', iface=iface) - subprocess.check_call(['ip', 'link', 'set', iface, 'up']) - try: - yield - finally: - self.status(msg='Bringing interface %(iface)s down', iface=iface) - subprocess.call(['ip', 'link', 'set', iface, 'down']) - - @contextlib.contextmanager - def static_ip(self, interface): - with self._static_ip(iface=interface) as (host_ip, client_ip, - broadcast_ip), \ - self._up_interface(iface=interface): - yield (_int_to_quad_dot(host_ip), - _int_to_quad_dot(client_ip), - _int_to_quad_dot(broadcast_ip)) - - @contextlib.contextmanager - def vlan(self, interface, vlan): - with self._vlan(interface=interface, vlan=vlan) as viface, \ - self.static_ip(interface=viface) \ - as (host_ip, client_ip, broadcast_ip): - yield host_ip, client_ip, broadcast_ip - - @contextlib.contextmanager - def _tempdir(self): - td = tempfile.mkdtemp() - print 'Created tempdir:', td - try: - yield td - finally: - shutil.rmtree(td, ignore_errors=True) - - @contextlib.contextmanager - def _remote_tempdir(self, hostname, template): - persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') - td = cliapp.ssh_runcmd(hostname, ['mktemp', '-d', template]).strip() - try: - yield td - finally: - if not persist: - cliapp.ssh_runcmd(hostname, ['find', td, '-delete']) - - def _serve_tftpd(self, sock, host, port, interface, tftproot): - self.settings.progname = 'tftp server' - self._set_process_name() - while True: - logging.debug('tftpd waiting for connections') - # recvfrom with MSG_PEEK is how you accept UDP connections - _, peer = sock.recvfrom(0, socket.MSG_PEEK) - conn = sock - logging.debug('Connecting socket to peer: ' + repr(peer)) - conn.connect(peer) - # The existing socket is now only serving that peer, so we need to - # bind a new UDP socket to the wildcard address, which needs the - # port to be in REUSEADDR mode. - conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - logging.debug('Binding replacement socket to ' + repr((host, port))) - sock.bind((host, port)) - - logging.debug('tftpd server handing connection to tftpd') - tftpd_serve = ['tftpd', '-rl', tftproot] - ret = subprocess.call(args=tftpd_serve, stdin=conn, - stdout=conn, stderr=None, close_fds=True) - # It's handy to turn off REUSEADDR after the rebinding, - # so we can protect against future bind attempts on this port. - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0) - logging.debug('tftpd exited %d' % ret) - os._exit(0) - - @contextlib.contextmanager - def _spawned_tftp_server(self, tftproot, host_ip, interface, tftp_port=0): - # inetd-style launchers tend to bind UDP ports with SO_REUSEADDR, - # because they need to have multiple ports bound, one for recieving - # all connection attempts on that port, and one for each concurrent - # connection with a peer - # this makes detecting whether there's a tftpd running difficult, so - # we'll instead use an ephemeral port and configure the PXE boot to - # use that tftp server for the kernel - s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) - s.bind((host_ip, tftp_port)) - host, port = s.getsockname() - self.status(msg='Bound listen socket to %(host)s, %(port)s', - host=host, port=port) - pid = os.fork() - if pid == 0: - try: - self._serve_tftpd(sock=s, host=host, port=port, - interface=interface, tftproot=tftproot) - except BaseException as e: - import traceback - traceback.print_exc() - os._exit(1) - s.close() - with executor(pid): - try: - yield port - finally: - self.status(msg='Killing tftpd listener pid=%(pid)d', - pid=pid) - os.kill(pid, signal.SIGKILL) - - @contextlib.contextmanager - def tftp_server(self, host_ip, interface, tftp_port=0): - with self._tempdir() as tftproot, \ - self._spawned_tftp_server(tftproot=tftproot, host_ip=host_ip, - interface=interface, - tftp_port=tftp_port) as tftp_port: - self.status(msg='Serving tftp root %(tftproot)s, on port %(port)d', - port=tftp_port, tftproot=tftproot) - yield tftp_port, tftproot - - @contextlib.contextmanager - def _local_copy(self, src, dst): - self.status(msg='Installing %(src)s to %(dst)s', - src=src, dst=dst) - shutil.copy2(src=src, dst=dst) - try: - yield - finally: - self.status(msg='Removing %(dst)s', dst=dst) - os.unlink(dst) - - @contextlib.contextmanager - def _local_symlink(self, src, dst): - os.symlink(src, dst) - try: - yield - finally: - os.unlink(dst) - - def local_pxelinux(self, tftproot): - return self._local_copy('/usr/share/syslinux/pxelinux.0', - os.path.join(tftproot, 'pxelinux.0')) - - def local_kernel(self, rootfs, tftproot): - return self._local_copy(os.path.join(rootfs, 'boot/vmlinuz'), - os.path.join(tftproot, 'kernel')) - - @contextlib.contextmanager - def _remote_copy(self, hostname, src, dst): - persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') - with open(src, 'r') as f: - cliapp.ssh_runcmd(hostname, - ['install', '-D', '-m644', '/proc/self/fd/0', - dst], stdin=f, stdout=None, stderr=None) - try: - yield - finally: - if not persist: - cliapp.ssh_runcmd(hostname, ['rm', dst]) - - @contextlib.contextmanager - def _remote_symlink(self, hostname, src, dst): - persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') - cliapp.ssh_runcmd(hostname, - ['ln', '-s', '-f', src, dst], - stdin=None, stdout=None, stderr=None) - try: - yield - finally: - if not persist: - cliapp.ssh_runcmd(hostname, ['rm', '-f', dst]) - - @contextlib.contextmanager - def remote_kernel(self, rootfs, tftp_url, macaddr): - for name in ('vmlinuz', 'zImage', 'uImage'): - kernel_path = os.path.join(rootfs, 'boot', name) - if os.path.exists(kernel_path): - break - else: - raise cliapp.AppException('Failed to locate kernel') - url = urlparse.urlsplit(tftp_url) - basename = '{}-kernel'.format(_normalise_macaddr(macaddr)) - target_path = os.path.join(url.path, basename) - with self._remote_copy(hostname=url.hostname, src=kernel_path, - dst=target_path): - yield basename - - @contextlib.contextmanager - def remote_fdt(self, rootfs, tftp_url, macaddr): - fdt_rel_path = os.environ.get('DTB_PATH', '') - if fdt_rel_path == '': - yield - fdt_abs_path = os.path.join(rootfs, fdt_rel_path) - if not fdt_abs_path: - raise cliapp.AppException('Failed to locate Flattened Device Tree') - url = urlparse.urlsplit(tftp_url) - basename = '{}-fdt'.format(_normalise_macaddr(macaddr)) - target_path = os.path.join(url.path, basename) - with self._remote_copy(hostname=url.hostname, src=fdt_abs_path, - dst=target_path): - yield basename - - @contextlib.contextmanager - def local_nfsroot(self, rootfs, target_ip): - nfsroot = target_ip + ':' + rootfs - self.status(msg='Exporting %(nfsroot)s as local nfsroot', - nfsroot=nfsroot) - cliapp.runcmd(['exportfs', '-o', 'ro,insecure,no_root_squash', - nfsroot]) - try: - yield - finally: - self.status(msg='Removing %(nfsroot)s from local nfsroots', - nfsroot=nfsroot) - cliapp.runcmd(['exportfs', '-u', nfsroot]) - - @contextlib.contextmanager - def remote_nfsroot(self, rootfs, rsync_url, macaddr): - url = urlparse.urlsplit(rsync_url) - template = os.path.join(url.path, - _normalise_macaddr(macaddr) + '.XXXXXXXXXX') - with self._remote_tempdir(hostname=url.hostname, template=template) \ - as tempdir: - nfsroot = urlparse.urlunsplit((url.scheme, url.netloc, tempdir, - url.query, url.fragment)) - cliapp.runcmd(['rsync', '-asSPH', '--delete', rootfs, nfsroot], - stdin=None, stdout=open(os.devnull, 'w'), - stderr=None) - yield os.path.join(os.path.basename(tempdir), - os.path.basename(rootfs)) - - @staticmethod - def _write_pxe_config(fh, kernel_tftp_url, rootfs_nfs_url, device=None, - fdt_subpath=None, extra_args=''): - - if device is None: - ip_cfg = "ip=dhcp" - else: - ip_cfg = "ip=:::::{device}:dhcp::".format(device=device) - - fh.write(textwrap.dedent('''\ - DEFAULT default - LABEL default - LINUX {kernel_url} - APPEND root=/dev/nfs {ip_cfg} nfsroot={rootfs_nfs_url} {extra_args} - ''').format(kernel_url=kernel_tftp_url, ip_cfg=ip_cfg, - rootfs_nfs_url=rootfs_nfs_url, extra_args=extra_args)) - if fdt_subpath is not None: - fh.write("FDT {}\n".format(fdt_subpath)) - fh.flush() - - @contextlib.contextmanager - def local_pxeboot_config(self, tftproot, macaddr, ip, tftp_port, - nfsroot_dir, device=None): - kernel_tftp_url = 'tftp://{}:{}/kernel'.format(ip, tftp_port) - rootfs_nfs_url = '{}:{}'.format(ip, nfsroot_dir) - pxe_cfg_filename = _normalise_macaddr(macaddr) - pxe_cfg_path = os.path.join(tftproot, 'pxelinux.cfg', pxe_cfg_filename) - os.makedirs(os.path.dirname(pxe_cfg_path)) - with open(pxe_cfg_path, 'w') as f: - self._write_pxe_config(fh=f, kernel_tftp_url=kernel_tftp_url, - rootfs_nfs_url=rootfs_nfs_url, - device=device, - extra_args=os.environ.get('KERNEL_ARGS','')) - - try: - with self._local_symlink( - src=pxe_cfg_filename, - dst=os.path.join(tftproot, - 'pxelinux.cfg', - '01-' + pxe_cfg_filename)): - yield - finally: - os.unlink(pxe_cfg_path) - - @contextlib.contextmanager - def remote_pxeboot_config(self, tftproot, kernel_tftproot, kernel_subpath, - fdt_subpath, rootfs_nfsroot, rootfs_subpath, - macaddr): - rootfs_nfs_url = '{}/{}'.format(rootfs_nfsroot, rootfs_subpath) - url = urlparse.urlsplit(kernel_tftproot) - kernel_tftp_url = '{}:{}'.format(url.netloc, kernel_subpath) - pxe_cfg_filename = _normalise_macaddr(macaddr) - url = urlparse.urlsplit(tftproot) - inst_cfg_path = os.path.join(url.path, 'pxelinux.cfg') - with tempfile.NamedTemporaryFile() as f: - self._write_pxe_config( - fh=f, kernel_tftp_url=kernel_tftp_url, - fdt_subpath=fdt_subpath, - rootfs_nfs_url=rootfs_nfs_url, - extra_args=os.environ.get('KERNEL_ARGS','')) - with self._remote_copy( - hostname=url.hostname, src=f.name, - dst=os.path.join(inst_cfg_path, - pxe_cfg_filename)), \ - self._remote_symlink( - hostname=url.hostname, - src=pxe_cfg_filename, - dst=os.path.join(inst_cfg_path, - '01-' + pxe_cfg_filename)): - yield - - @contextlib.contextmanager - def dhcp_server(self, interface, host_ip, target_ip, broadcast_ip): - with self._tempdir() as td: - leases_path = os.path.join(td, 'leases') - config_path = os.path.join(td, 'config') - stdout_path = os.path.join(td, 'stdout') - stderr_path = os.path.join(td, 'stderr') - pidfile_path = os.path.join(td, 'pid') - with open(config_path, 'w') as f: - f.write(textwrap.dedent('''\ - start {target_ip} - end {target_ip} - interface {interface} - max_leases 1 - lease_file {leases_path} - pidfile {pidfile_path} - boot_file pxelinux.0 - option dns {host_ip} - option broadcast {broadcast_ip} - ''').format(**locals())) - with open(stdout_path, 'w') as stdout, \ - open(stderr_path, 'w') as stderr: - sp = subprocess.Popen(['udhcpd', '-f', config_path], cwd=td, - stdin=open(os.devnull), stdout=stdout, - stderr=stderr) - try: - with executor(sp.pid): - yield - finally: - sp.terminate() - - def get_interface_ip(self, interface): - ip_addresses = [] - info = cliapp.runcmd(['ip', '-o', '-f', 'inet', - 'addr', 'show', interface]).rstrip('\n') - if info: - tokens = collections.deque(info.split()[1:]) - ifname = tokens.popleft() - while tokens: - tok = tokens.popleft() - if tok == 'inet': - address = tokens.popleft() - address, netmask = address.split('/') - ip_addresses.append(address) - elif tok == 'brd': - tokens.popleft() # not interested in broadcast address - elif tok == 'scope': - tokens.popleft() # not interested in scope tag - else: - continue - if not ip_addresses: - raise cliapp.AppException('Interface %s has no addresses' - % interface) - if len(ip_addresses) > 1: - warnings.warn('Interface %s has multiple addresses, ' - 'using first (%s)' % (interface, ip_addresses[0])) - return ip_addresses[0] - - def ipmi_set_target_vlan(self): - if any(env_var.startswith('IPMI_') for env_var in os.environ): - # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN - default = textwrap.dedent('''\ - ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ - lan set 1 vlan id "$PXEBOOT_VLAN" - ''') - else: - default = textwrap.dedent('''\ - while true; do - echo Please set the target\\'s vlan to $PXEBOOT_VLAN, \\ - then enter \\"vlanned\\" - read - if [ "$REPLY" = vlanned ]; then - break - fi - done - ''') - command = os.environ.get('PXEBOOT_SET_VLAN_COMMAND', default) - subprocess.check_call(['sh', '-euc', command, '-']) - - def ipmi_pxe_reboot_target(self): - if any(env_var.startswith('IPMI_') for env_var in os.environ): - # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST and PXEBOOT_VLAN - default = textwrap.dedent('''\ - set -- ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" - "$@" chassis bootdev pxe - "$@" chassis power reset - ''') - else: - default = textwrap.dedent('''\ - while true; do - echo Please reboot the target in PXE mode, then\\ - enter \\"pxe-booted\\" - read - if [ "$REPLY" = pxe-booted ]; then - break - fi - done - ''') - command = os.environ.get('PXEBOOT_PXE_REBOOT_COMMAND', default) - subprocess.check_call(['sh', '-euc', command, '-']) - - def wait_for_target_to_install(self): - command = os.environ.get( - 'PXEBOOT_WAIT_INSTALL_COMMAND', - textwrap.dedent('''\ - while true; do - echo Please wait for the system to install, then \\ - enter \\"installed\\" - read - if [ "$REPLY" = installed ]; then - break - fi - done - ''')) - subprocess.check_call(['sh', '-euc', command, '-']) - - def ipmi_unset_target_vlan(self): - if any(env_var.startswith('IPMI_') for env_var in os.environ): - # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST - default = textwrap.dedent('''\ - ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ - lan set 1 vlan id off - ''') - else: - default = textwrap.dedent('''\ - while true; do - echo Please reset the target\\'s vlan, \\ - then enter \\"unvlanned\\" - read - if [ "$REPLY" = unvlanned ]; then - break - fi - done - ''') - command = os.environ.get('PXEBOOT_UNSET_VLAN_COMMAND', default) - subprocess.check_call(['sh', '-euc', command, '-']) - - def ipmi_reboot_target(self): - if any(env_var.startswith('IPMI_') for env_var in os.environ): - # Needs IPMI_USER, IPMI_PASSWORD, IPMI_HOST - default = textwrap.dedent('''\ - ipmitool -I lanplus -U "$IPMI_USER" -E -H "$IPMI_HOST" \\ - chassis power reset - ''') - else: - default = textwrap.dedent('''\ - while true; do - echo Please reboot the target, then\\ - enter \\"rebooted\\" - read - if [ "$REPLY" = rebooted ]; then - break - fi - done - ''') - command = os.environ.get('PXEBOOT_REBOOT_COMMAND', default) - subprocess.check_call(['sh', '-euc', command, '-']) - - def process_args(self, (temp_root, macaddr)): - interface = os.environ.get('PXEBOOT_DEPLOYER_INTERFACE', None) - target_interface = os.environ.get('PXEBOOT_TARGET_INTERFACE', None) - vlan = os.environ.get('PXEBOOT_VLAN') - if vlan is not None: vlan = int(vlan) - mode = os.environ.get('PXEBOOT_MODE') - if mode is None: - if interface: - if vlan is not None: - mode = 'spawn-vlan' - else: - if 'PXEBOOT_CONFIG_TFTP_ADDRESS' in os.environ: - mode = 'existing-dhcp' - else: - mode = 'spawn-novlan' - else: - mode = 'existing-server' - assert mode in ('spawn-vlan', 'spawn-novlan', 'existing-dhcp', - 'existing-server') - if mode == 'spawn-vlan': - with self.vlan(interface=interface, vlan=vlan) \ - as (host_ip, target_ip, broadcast_ip), \ - self.tftp_server(host_ip=host_ip, interface=interface) \ - as (tftp_port, tftproot), \ - self.local_pxelinux(tftproot=tftproot), \ - self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ - self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \ - self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr, - device=target_interface, - ip=host_ip, tftp_port=tftp_port, - nfsroot_dir=temp_root), \ - self.dhcp_server(interface=interface, host_ip=host_ip, - target_ip=target_ip, - broadcast_ip=broadcast_ip): - self.ipmi_set_target_vlan() - self.ipmi_pxe_reboot_target() - self.wait_for_target_to_install() - self.ipmi_unset_target_vlan() - self.ipmi_reboot_target() - elif mode == 'spawn-novlan': - with self.static_ip(interface=interface) as (host_ip, target_ip, - broadcast_ip), \ - self.tftp_server(host_ip=host_ip, interface=interface, - tftp_port=69) \ - as (tftp_port, tftproot), \ - self.local_pxelinux(tftproot=tftproot), \ - self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ - self.local_nfsroot(rootfs=temp_root, target_ip=target_ip), \ - self.local_pxeboot_config(tftproot=tftproot, macaddr=macaddr, - device=target_interface, - ip=host_ip, tftp_port=tftp_port, - nfsroot_dir=temp_root), \ - self.dhcp_server(interface=interface, host_ip=host_ip, - target_ip=target_ip, - broadcast_ip=broadcast_ip): - self.ipmi_pxe_reboot_target() - self.wait_for_target_to_install() - self.ipmi_reboot_target() - elif mode == 'existing-dhcp': - ip = self.get_interface_ip(interface) - config_tftpaddr = os.environ['PXEBOOT_CONFIG_TFTP_ADDRESS'] - with self.tftp_server(ip=ip, interface=interface, tftp_port=69) \ - as (tftp_port, tftproot), \ - self.local_kernel(rootfs=temp_root, tftproot=tftproot), \ - self.local_nfsroot(rootfs=temp_root, client_ip=''): - kernel_tftproot = 'tftp://{}:{}/'.format(ip, tftp_port) - rootfs_nfsroot = '{}:{}'.format(ip, temp_root) - with self.remote_pxeboot_config( - tftproot=config_tftpaddr, - kernel_tftproot=kernel_tftproot, - kernel_subpath='kernel', - rootfs_nfsroot=nfsroot, - rootfs_subpath='', - macaddr=macaddr): - self.ipmi_pxe_reboot_target() - self.wait_for_target_to_install() - self.ipmi_reboot_target() - elif mode == 'existing-server': - config_tftpaddr = os.environ[ 'PXEBOOT_CONFIG_TFTP_ADDRESS'] - kernel_tftpaddr = os.environ.get('PXEBOOT_KERNEL_TFTP_ADDRESS', - config_tftpaddr) - url = urlparse.urlsplit(kernel_tftpaddr) - kernel_tftproot = os.environ.get('PXEBOOT_KERNEL_TFTP_ROOT', - 'tftp://%s/%s' % (url.hostname, - url.path)) - rootfs_rsync = os.environ['PXEBOOT_ROOTFS_RSYNC_ADDRESS'] - url = urlparse.urlsplit(rootfs_rsync) - nfsroot = os.environ.get('PXEBOOT_ROOTFS_NFSROOT', - '%s:%s' % (url.hostname, url.path)) - with self.remote_kernel(rootfs=temp_root, tftp_url=kernel_tftpaddr, - macaddr=macaddr) as kernel_subpath, \ - self.remote_fdt(rootfs=temp_root, tftp_url=kernel_tftpaddr, - macaddr=macaddr) as fdt_subpath, \ - self.remote_nfsroot(rootfs=temp_root, rsync_url=rootfs_rsync, \ - macaddr=macaddr) as rootfs_subpath, \ - self.remote_pxeboot_config(tftproot=config_tftpaddr, - kernel_tftproot=kernel_tftproot, - kernel_subpath=kernel_subpath, - fdt_subpath=fdt_subpath, - rootfs_nfsroot=nfsroot, - rootfs_subpath=rootfs_subpath, - macaddr=macaddr): - persist = os.environ.get('PXE_INSTALLER') in ('no', 'False') - if not persist: - self.ipmi_pxe_reboot_target() - self.wait_for_target_to_install() - self.ipmi_reboot_target() - else: - cliapp.AppException('Invalid PXEBOOT_MODE: %s' % mode) - -PXEBoot().run() diff --git a/pxeboot.write.help b/pxeboot.write.help deleted file mode 100644 index 7cb78bce..00000000 --- a/pxeboot.write.help +++ /dev/null @@ -1,166 +0,0 @@ -help: > - pxeboot.write extension. - - - This write extension will serve your generated system over NFS to - the target system. - - In all modes `location` is the mac address of the interface that - the target will PXE boot from. This is used so that the target will - load the configuration file appropriate to it. - - - # `PXEBOOT_MODE` - - - It has 4 modes, which can be specified with PXEBOOT_MODE, or inferred - from which parameters are passed: - - - ## spawn-vlan - - - Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_VLAN to configure - the target to pxeboot on a vlan and spawn a dhcp, nfs and tftp - server. This is potentially the fastest, since it doesn't need to - copy data to other servers. - - This will create a vlan interface for the interface specified in - PXEBOOT_DEPLOYER_INTERFACE and spawn a dhcp server which serves - pxelinux.0, a configuration file and a kernel image from itself. - - The configuration file informs the target to boot with a kernel - command-line that uses an NFS root served from the deployment host. - - - ## spawn-novlan - - - Specify PXEBOOT_DEPLOYER_INTERFACE without PXEBOOT_VLAN to configure - like `spawn-vlan`, but without creating the vlan interface. - - This assumes that you have exclusive access to the interface, such - as if you're plugged in to the device directly, or your interface - is vlanned by your infrastructure team. - - This is required if you are serving from a VM and bridging it to the - correct network via macvtap. For this to work, you need to macvtap - bridge to a pre-vlanned interface on your host machine. - - - ## existing-dhcp - - - Specify PXEBOOT_DEPLOYER_INTERFACE and PXEBOOT_CONFIG_TFTP_ADDRESS - to put config on an existing tftp server, already configured by the - dhcp server. - - This spawns a tftp server and configures the local nfs server, but - doesn't spawn a dhcp server. This is useful if you have already got a - dhcp server that serves PXE images. - - PXEBOOT_CONFIG_TFTP_ADDRESS is a URL in the form `sftp://$HOST/$PATH`. - The configuration file is copied to `$PATH/pxelinux.cfg/` on the - target identified by `$HOST`. - - - ## existing-server - - - Specify at least PXEBOOT_CONFIG_TFTP_ADDRESS and - PXEBOOT_ROOTFS_RSYNC_ADDRESS to specify existing servers to copy - config, kernels and the rootfs to. - - Configuration is copied to the target as `existing-dhcp`. - - Specify PXEBOOT_KERNEL_TFTP_ADDRESS if the tftp server that the - kernel must be downloaded from is different to that of the pxelinux - configuration file. - - PXEBOOT_ROOTFS_RSYNC_ADDRESS is a rsync URL describing where to copy - nfsroots to where they will be exported by the NFS server. - - Specify PXEBOOT_ROOTFS_NFSROOT if the nfsroot appears as a different - address from the target's perspective. - - - # IPMI commands - - - After the PXE boot has been set up, the target needs to be rebooted - in PXE mode. - - If the target is IPMI enabled, you can set `IPMI_USER`, `IPMI_HOST` - and `IPMI_PASSWORD` to make it reboot the target into netboot mode - automatically. - - If they are not specified, then instructions will be displayed, and - `pxeboot.write` will wait for you to finish. - - If there are command-line automation tools for rebooting the target - in netboot mode, then appropriate commands can be defined in the - following variables. - - - ## PXEBOOT_PXE_REBOOT_COMMAND - - - This command will be used to reboot the target device with its boot - device set to PXE boot. - - - ## PXEBOOT_REBOOT_COMMAND - - - This command will be used to reboot the target device in its default - boot mode. - - - ## PXEBOOT_WAIT_INSTALL_COMMAND - - - If it is possible for the target to notify you that it has finished - installing, you can put a command in here to wait for the event. - - - # Misc - - - ## KERNEL_ARGS - - - Additional kernel command line options. Note that the following - options - - root=/dev/nfs ip=dhcp nfsroot=$NFSROOT` - - are implicitly added by the extension. - - - ## DTB_PATH - - - Location in the deployed root filesystem of the Flattened Device - Tree blob (FDT) to use. - - - ## PXE_INSTALLER - - - If set to `no`, `False` or any other YAML value for false, the - remotely installed rootfs, kernel, bootloader config file and - device tree blob if specified, will not be removed after the - deployment finishes. This variable is only meanful on the - `existing-server` mode. - - - ## PXEBOOT_TARGET_INTERFACE - - Name of the interface of the target to pxeboot from. Some targets - with more than one interface try to get the rootfs from a different - interface than the interface from where the pxeboot server is - reachable. Using this variable, the kernel arguments will be filled - to include the device. - - Note that the name of this interface is the kernel's default name, - usually called ethX, and is non-determinisic. diff --git a/rawdisk.check b/rawdisk.check deleted file mode 100755 index 9be0ce91..00000000 --- a/rawdisk.check +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -'''Preparatory checks for Morph 'rawdisk' write extension''' - -import cliapp - -import morphlib.writeexts - -import os - - -class RawdiskCheckExtension(morphlib.writeexts.WriteExtension): - def process_args(self, args): - if len(args) != 1: - raise cliapp.AppException('Wrong number of command line args') - - self.require_btrfs_in_deployment_host_kernel() - - location = args[0] - upgrade = self.get_environment_boolean('UPGRADE') - if upgrade: - if not self.is_device(location): - if not os.path.isfile(location): - raise cliapp.AppException( - 'Cannot upgrade %s: it is not an existing disk image' % - location) - - version_label = os.environ.get('VERSION_LABEL') - if version_label is None: - raise cliapp.AppException( - 'VERSION_LABEL was not given. It is required when ' - 'upgrading an existing system.') - else: - if not self.is_device(location): - if os.path.exists(location): - raise cliapp.AppException( - 'Target %s already exists. Use `morph upgrade` if you ' - 'want to update an existing image.' % location) - -RawdiskCheckExtension().run() diff --git a/rawdisk.write b/rawdisk.write deleted file mode 100755 index 6f2d45ba..00000000 --- a/rawdisk.write +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2012-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -'''A Morph deployment write extension for raw disk images.''' - - -import cliapp -import os -import sys -import time -import tempfile - -import morphlib.writeexts - - -class RawDiskWriteExtension(morphlib.writeexts.WriteExtension): - - '''See rawdisk.write.help for documentation''' - - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - temp_root, location = args - upgrade = self.get_environment_boolean('UPGRADE') - - if upgrade: - self.upgrade_local_system(location, temp_root) - else: - try: - if not self.is_device(location): - with self.created_disk_image(location): - self.format_btrfs(location) - self.create_system(temp_root, location) - self.status(msg='Disk image has been created at %s' % - location) - else: - self.format_btrfs(location) - self.create_system(temp_root, location) - self.status(msg='System deployed to %s' % location) - except Exception: - self.status(msg='Failure to deploy system to %s' % - location) - raise - - def upgrade_local_system(self, raw_disk, temp_root): - self.complete_fstab_for_btrfs_layout(temp_root) - - with self.mount(raw_disk) as mp: - version_label = self.get_version_label(mp) - self.status(msg='Updating image to a new version with label %s' % - version_label) - - version_root = os.path.join(mp, 'systems', version_label) - os.mkdir(version_root) - - old_orig = os.path.join(mp, 'systems', 'default', 'orig') - new_orig = os.path.join(version_root, 'orig') - cliapp.runcmd( - ['btrfs', 'subvolume', 'snapshot', old_orig, new_orig]) - - cliapp.runcmd( - ['rsync', '-a', '--checksum', '--numeric-ids', '--delete', - temp_root + os.path.sep, new_orig]) - - self.create_run(version_root) - - default_path = os.path.join(mp, 'systems', 'default') - if os.path.exists(default_path): - os.remove(default_path) - else: - # we are upgrading and old system that does - # not have an updated extlinux config file - if self.bootloader_config_is_wanted(): - self.generate_bootloader_config(mp) - self.install_bootloader(mp) - os.symlink(version_label, default_path) - - if self.bootloader_config_is_wanted(): - self.install_kernel(version_root, temp_root) - - def get_version_label(self, mp): - version_label = os.environ.get('VERSION_LABEL') - - if version_label is None: - raise cliapp.AppException('VERSION_LABEL was not given') - - if os.path.exists(os.path.join(mp, 'systems', version_label)): - raise cliapp.AppException('VERSION_LABEL %s already exists' - % version_label) - - return version_label - - -RawDiskWriteExtension().run() diff --git a/rawdisk.write.help b/rawdisk.write.help deleted file mode 100644 index 52ed73fb..00000000 --- a/rawdisk.write.help +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - - Write a system produced by Morph to a physical disk, or to a file that can - be used as a virtual disk. The target will be formatted as a single Btrfs - partition, with the system image written to a subvolume in /systems, and - other subvolumes created for /home, /opt, /root, /srv and /var. - - When written to a physical drive, the drive can be used as the boot device - for a 'real' machine. - - When written to a file, the file can be used independently of `morph` to - create virtual machines with KVM / libvirt, OpenStack or, after converting - it to VDI format, VirtualBox. - - `morph deploy` will fail if the file specified by `location` already - exists. - - If used in `morph upgrade`, the rootfs produced by 'morph build' is added - to the existing raw disk image or device as an additional btrfs sub-volume. - `morph upgrade` will fail if the file specified by `location` does not - exist, or is not a Baserock raw disk image. (Most users are unlikely to - need or use this functionality: it is useful mainly for developers working - on the Baserock tools.) - - Parameters: - - * location: the pathname of the disk image to be created/upgraded, or the - path to the physical device. - - * VERSION_LABEL=label - should contain only alpha-numeric - characters and the '-' (hyphen) character. Mandatory if being used with - `morph update` - - * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to - tell Linux to use, rather than booting the rootfs directly. - - * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree - binary - Give the full path (without a leading /) to the location of the - DTB in the built system image . The deployment will fail if `path` does - not exist. - - * BOOTLOADER_INSTALL=value: the bootloader to be installed - **(MANDATORY)** for non-x86 systems - - allowed values = - - 'extlinux' (default) - the extlinux bootloader will - be installed - - 'none' - no bootloader will be installed by `morph deploy`. A - bootloader must be installed manually. This value must be used when - deploying non-x86 systems such as ARM. - - * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used. - If not specified for x86-32 and x86-64 systems, 'extlinux' will be used - - allowed values = - - 'extlinux' - - * KERNEL_ARGS=args: optional additional kernel command-line parameters to - be appended to the default set. The default set is: - - 'rw init=/sbin/init rootfstype=btrfs \ - rootflags=subvol=systems/default/run \ - root=[name or UUID of root filesystem]' - - (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt) - - (See `morph help deploy` for details of how to pass parameters to write - extensions) diff --git a/sdk.write b/sdk.write deleted file mode 100755 index 8d3d2a63..00000000 --- a/sdk.write +++ /dev/null @@ -1,284 +0,0 @@ -#!/bin/sh -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# =*= License: GPL-2 =*= - -set -eu - -die(){ - echo "$@" >&2 - exit 1 -} - -shellescape(){ - echo "'$(echo "$1" | sed -e "s/'/'\\''/g")'" -} - -########################## END OF COMMON HEADER ############################### -# -# The above lines, as well as being part of this script, are copied into the -# self-installing SDK blob's header script, as a means of re-using content. -# - -help(){ - cat <>"$OUTPUT_SCRIPT" <>"$OUTPUT_SCRIPT" <<'EOF' -########################### START OF HEADER SCRIPT ############################ - -usage(){ - cat <&2 - usage >&2 - exit 1 -fi - -TOOLCHAIN_PATH="$(readlink -f \"$1\")" - -sedescape(){ - # Escape the passed in string so it can be safely interpolated into - # a sed expression as a literal value. - echo "$1" | sed -e 's/[\/&]/\\&/g' -} - -prepend_to_path_elements(){ - # Prepend $1 to every entry in the : separated list specified as $2. - local prefix="$1" - ( - # Split path into components - IFS=: - set -- $2 - # Print path back out with new prefix - printf %s "$prefix/$1" - shift - for arg in "$@"; do - printf ":%s" "$prefix/$arg" - done - ) -} - -extract_rootfs(){ - # Extract the bzipped tarball at the end of the script passed as $1 - # to the path specified as $2 - local selfextractor="$1" - local target="$2" - local script_end="$(($(\ - grep -aEn -m1 -e '^#+ END OF HEADER SCRIPT #+$' "$selfextractor" | - cut -d: -f1) + 1 ))" - mkdir -p "$target" - tail -n +"$script_end" "$selfextractor" | tar -xj -C "$target" . -} - -amend_text_file_paths(){ - # Replace all instances of $3 with $4 in the directory specified by $1 - # excluding the subdirectory $2 - local root="$1" - local inner_sysroot="$2" - local old_prefix="$3" - local new_prefix="$4" - find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \ - -exec sh -c 'file "$1" | grep -q text' - {} \; \ - -exec sed -i -e \ - "s/$(sedescape "$old_prefix")/$(sedescape "$new_prefix")/g" {} + -} - -filter_patchelf_errors(){ - # Filter out warnings from patchelf that are acceptable - # The warning that it's making a file bigger is just noise - # The warning about not being an ELF executable just means we got a - # false positive from file that it was an ELF binary - # Failing to find .interp is because for convenience, we set the - # interpreter in the same command as setting the rpath, even though - # we give it both executables and libraries. - grep -v -e 'warning: working around a Linux kernel bug' \ - -e 'not an ELF executable' \ - -e 'cannot find section .interp' -} - -patch_elves(){ - # Set the interpreter and library paths of ELF binaries in $1, - # except for the $2 subdirectory, using the patchelf command in the - # toolchain specified as $3, so that it uses the linker specified - # as $4 as the interpreter, and the runtime path specified by $5. - # - # The patchelf inside the toolchain is used to ensure that it works - # independently of the availability of patchelf on the host. - # - # This is possible by invoking the linker directly and specifying - # --linker-path as the RPATH we want to set the binaries to use. - local root="$1" - local inner_sysroot="$2" - local patchelf="$3" - local linker="$4" - local lib_path="$5" - find "$root" \( -path "$inner_sysroot" -prune \) -o -type f \ - -type f -perm +u=x \ - -exec sh -c 'file "$1" | grep -q "ELF"' - {} \; \ - -exec "$linker" --library-path "$lib_path" \ - "$patchelf" --set-interpreter "$linker" \ - --set-rpath "$lib_path" {} \; 2>&1 \ - | filter_patchelf_errors -} - -generate_environment_setup(){ - local target="$1" - install -m 644 -D /dev/stdin "$target" <>"$OUTPUT_SCRIPT" . diff --git a/set-hostname.configure b/set-hostname.configure deleted file mode 100755 index 4b2424d8..00000000 --- a/set-hostname.configure +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh -# Copyright (C) 2013,2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -# Set hostname on system from HOSTNAME. - - -set -e - -if [ -n "$HOSTNAME" ] -then - echo "$HOSTNAME" > "$1/etc/hostname" -fi - diff --git a/simple-network.configure b/simple-network.configure deleted file mode 100755 index 4a70f311..00000000 --- a/simple-network.configure +++ /dev/null @@ -1,292 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013,2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -'''A Morph deployment configuration extension to handle network configutation - -This extension prepares /etc/network/interfaces and networkd .network files -in /etc/systemd/network/ with the interfaces specified during deployment. - -If no network configuration is provided, eth0 will be configured for DHCP -with the hostname of the system in the case of /etc/network/interfaces. -In the case of networkd, any interface starting by e* will be configured -for DHCP -''' - - -import os -import sys -import errno -import cliapp - -import morphlib - - -class SimpleNetworkError(morphlib.Error): - '''Errors associated with simple network setup''' - pass - - -class SimpleNetworkConfigurationExtension(cliapp.Application): - '''Configure /etc/network/interfaces and generate networkd .network files - - Reading NETWORK_CONFIG, this extension sets up /etc/network/interfaces - and .network files in /etc/systemd/network/. - ''' - - def process_args(self, args): - network_config = os.environ.get("NETWORK_CONFIG") - - self.rename_networkd_chunk_file(args) - - if network_config is None: - self.generate_default_network_config(args) - else: - self.status(msg="Processing NETWORK_CONFIG=%(nc)s", - nc=network_config) - - stanzas = self.parse_network_stanzas(network_config) - - self.generate_interfaces_file(args, stanzas) - self.generate_networkd_files(args, stanzas) - - def rename_networkd_chunk_file(self, args): - """Rename the 10-dchp.network file generated in the systemd chunk - - The systemd chunk will place something in 10-dhcp.network, which will - have higher precedence than anything added in this extension (we - start at 50-*). - - We should check for that file and rename it instead remove it in - case the file is being used by the user. - - Until both the following happen, we should continue to rename that - default config file: - - 1. simple-network.configure is always run when systemd is included - 2. We've been building systems without systemd including that default - networkd config for long enough that nobody should be including - that config file. - """ - file_path = os.path.join(args[0], "etc", "systemd", "network", - "10-dhcp.network") - - if os.path.isfile(file_path): - try: - os.rename(file_path, file_path + ".morph") - self.status(msg="Renaming networkd file from systemd chunk: \ - %(f)s to %(f)s.morph", f=file_path) - except OSError: - pass - - def generate_default_network_config(self, args): - """Generate default network config: DHCP in all the interfaces""" - - default_network_config_interfaces = "lo:loopback;" \ - "eth0:dhcp,hostname=$(hostname)" - default_network_config_networkd = "e*:dhcp" - - stanzas_interfaces = self.parse_network_stanzas( - default_network_config_interfaces) - stanzas_networkd = self.parse_network_stanzas( - default_network_config_networkd) - - self.generate_interfaces_file(args, stanzas_interfaces) - self.generate_networkd_files(args, stanzas_networkd) - - def generate_interfaces_file(self, args, stanzas): - """Generate /etc/network/interfaces file""" - - iface_file = self.generate_iface_file(stanzas) - - directory_path = os.path.join(args[0], "etc", "network") - self.make_sure_path_exists(directory_path) - file_path = os.path.join(directory_path, "interfaces") - with open(file_path, "w") as f: - f.write(iface_file) - - def generate_iface_file(self, stanzas): - """Generate an interfaces file from the provided stanzas. - - The interfaces will be sorted by name, with loopback sorted first. - """ - - def cmp_iface_names(a, b): - a = a['name'] - b = b['name'] - if a == "lo": - return -1 - elif b == "lo": - return 1 - else: - return cmp(a,b) - - return "\n".join(self.generate_iface_stanza(stanza) - for stanza in sorted(stanzas, cmp=cmp_iface_names)) - - def generate_iface_stanza(self, stanza): - """Generate an interfaces stanza from the provided data.""" - - name = stanza['name'] - itype = stanza['type'] - lines = ["auto %s" % name, "iface %s inet %s" % (name, itype)] - lines += [" %s %s" % elem for elem in stanza['args'].items()] - lines += [""] - return "\n".join(lines) - - def generate_networkd_files(self, args, stanzas): - """Generate .network files""" - - for i, stanza in enumerate(stanzas, 50): - iface_file = self.generate_networkd_file(stanza) - - if iface_file is None: - continue - - directory_path = os.path.join(args[0], "etc", "systemd", "network") - self.make_sure_path_exists(directory_path) - file_path = os.path.join(directory_path, - "%s-%s.network" % (i, stanza['name'])) - - with open(file_path, "w") as f: - f.write(iface_file) - - def generate_networkd_file(self, stanza): - """Generate an .network file from the provided data.""" - - name = stanza['name'] - itype = stanza['type'] - pairs = stanza['args'].items() - - if itype == "loopback": - return - - lines = ["[Match]"] - lines += ["Name=%s\n" % name] - lines += ["[Network]"] - if itype == "dhcp": - lines += ["DHCP=yes"] - else: - lines += self.generate_networkd_entries(pairs) - - return "\n".join(lines) - - def generate_networkd_entries(self, pairs): - """Generate networkd configuration entries with the other parameters""" - - address = None - netmask = None - gateway = None - dns = None - lines = [] - - for pair in pairs: - if pair[0] == 'address': - address = pair[1] - elif pair[0] == 'netmask': - netmask = pair[1] - elif pair[0] == 'gateway': - gateway = pair[1] - elif pair[0] == 'dns': - dns = pair[1] - - if address and netmask: - network_suffix = self.convert_net_mask_to_cidr_suffix (netmask); - address_line = address + '/' + str(network_suffix) - lines += ["Address=%s" % address_line] - elif address or netmask: - raise Exception('address and netmask must be specified together') - - if gateway: - lines += ["Gateway=%s" % gateway] - - if dns: - lines += ["DNS=%s" % dns] - - return lines - - def convert_net_mask_to_cidr_suffix(self, mask): - """Convert dotted decimal form of a subnet mask to CIDR suffix notation - - For example: 255.255.255.0 -> 24 - """ - return sum(bin(int(x)).count('1') for x in mask.split('.')) - - def parse_network_stanzas(self, config): - """Parse a network config environment variable into stanzas. - - Network config stanzas are semi-colon separated. - """ - - return [self.parse_network_stanza(s) for s in config.split(";")] - - def parse_network_stanza(self, stanza): - """Parse a network config stanza into name, type and arguments. - - Each stanza is of the form name:type[,arg=value]... - - For example: - lo:loopback - eth0:dhcp - eth1:static,address=10.0.0.1,netmask=255.255.0.0 - """ - elements = stanza.split(",") - lead = elements.pop(0).split(":") - if len(lead) != 2: - raise SimpleNetworkError("Stanza '%s' is missing its type" % - stanza) - iface = lead[0] - iface_type = lead[1] - - if iface_type not in ['loopback', 'static', 'dhcp']: - raise SimpleNetworkError("Stanza '%s' has unknown interface type" - " '%s'" % (stanza, iface_type)) - - argpairs = [element.split("=", 1) for element in elements] - output_stanza = { "name": iface, - "type": iface_type, - "args": {} } - for argpair in argpairs: - if len(argpair) != 2: - raise SimpleNetworkError("Stanza '%s' has bad argument '%r'" - % (stanza, argpair.pop(0))) - if argpair[0] in output_stanza["args"]: - raise SimpleNetworkError("Stanza '%s' has repeated argument" - " %s" % (stanza, argpair[0])) - output_stanza["args"][argpair[0]] = argpair[1] - - return output_stanza - - def make_sure_path_exists(self, path): - try: - os.makedirs(path) - except OSError as e: - if e.errno == errno.EEXIST and os.path.isdir(path): - pass - else: - raise SimpleNetworkError("Unable to create directory '%s'" - % path) - - def status(self, **kwargs): - '''Provide status output. - - The ``msg`` keyword argument is the actual message, - the rest are values for fields in the message as interpolated - by %. - - ''' - - self.output.write('%s\n' % (kwargs['msg'] % kwargs)) - -SimpleNetworkConfigurationExtension().run() diff --git a/ssh-rsync.check b/ssh-rsync.check deleted file mode 100755 index c3bdfd29..00000000 --- a/ssh-rsync.check +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -'''Preparatory checks for Morph 'ssh-rsync' write extension''' - -import cliapp - -import os - -import morphlib.writeexts - -class SshRsyncCheckExtension(morphlib.writeexts.WriteExtension): - def process_args(self, args): - if len(args) != 1: - raise cliapp.AppException('Wrong number of command line args') - - upgrade = self.get_environment_boolean('UPGRADE') - if not upgrade: - raise cliapp.AppException( - 'The ssh-rsync write is for upgrading existing remote ' - 'Baserock machines. It cannot be used for an initial ' - 'deployment.') - - if os.environ.get('VERSION_LABEL', '') == '': - raise cliapp.AppException( - 'A VERSION_LABEL must be set when deploying an upgrade.') - - location = args[0] - self.check_ssh_connectivity(location) - self.check_is_baserock_system(location) - - # The new system that being deployed as an upgrade must contain - # baserock-system-config-sync and system-version-manager. However, the - # old system simply needs to have SSH and rsync. - self.check_command_exists(location, 'rsync') - - def check_is_baserock_system(self, location): - output = cliapp.ssh_runcmd(location, ['sh', '-c', - 'test -d /baserock || echo -n dirnotfound']) - if output == 'dirnotfound': - raise cliapp.AppException('%s is not a baserock system' - % location) - - def check_command_exists(self, location, command): - test = 'type %s > /dev/null 2>&1 || echo -n cmdnotfound' % command - output = cliapp.ssh_runcmd(location, ['sh', '-c', test]) - if output == 'cmdnotfound': - raise cliapp.AppException( - "%s does not have %s" % (location, command)) - - -SshRsyncCheckExtension().run() diff --git a/ssh-rsync.write b/ssh-rsync.write deleted file mode 100755 index 6d596500..00000000 --- a/ssh-rsync.write +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -'''A Morph deployment write extension for upgrading systems over ssh.''' - - -import contextlib -import cliapp -import os -import sys -import time -import tempfile - -import morphlib.writeexts - - -def ssh_runcmd_ignore_failure(location, command, **kwargs): - try: - return cliapp.ssh_runcmd(location, command, **kwargs) - except cliapp.AppException: - pass - - -class SshRsyncWriteExtension(morphlib.writeexts.WriteExtension): - - '''See ssh-rsync.write.help for documentation''' - - - def find_root_disk(self, location): - '''Read /proc/mounts on location to find which device contains "/"''' - - self.status(msg='Finding device that contains "/"') - contents = cliapp.ssh_runcmd(location, ['cat', '/proc/mounts']) - for line in contents.splitlines(): - line_words = line.split() - if (line_words[1] == '/' and line_words[0] != 'rootfs'): - return line_words[0] - - @contextlib.contextmanager - def _remote_mount_point(self, location): - self.status(msg='Creating remote mount point') - remote_mnt = cliapp.ssh_runcmd(location, ['mktemp', '-d']).strip() - try: - yield remote_mnt - finally: - self.status(msg='Removing remote mount point') - cliapp.ssh_runcmd(location, ['rmdir', remote_mnt]) - - @contextlib.contextmanager - def _remote_mount(self, location, root_disk, mountpoint): - self.status(msg='Mounting root disk') - cliapp.ssh_runcmd(location, ['mount', root_disk, mountpoint]) - try: - yield - finally: - self.status(msg='Unmounting root disk') - cliapp.ssh_runcmd(location, ['umount', mountpoint]) - - @contextlib.contextmanager - def _created_version_root(self, location, remote_mnt, version_label): - version_root = os.path.join(remote_mnt, 'systems', version_label) - self.status(msg='Creating %(root)s', root=version_root) - cliapp.ssh_runcmd(location, ['mkdir', version_root]) - try: - yield version_root - except BaseException as e: - # catch all, we always want to clean up - self.status(msg='Cleaning up %(root)s', root=version_root) - ssh_runcmd_ignore_failure(location, ['rmdir', version_root]) - raise - - def get_old_orig(self, location, remote_mnt): - '''Identify which subvolume to snapshot from''' - - # rawdisk upgrades use 'default' - return os.path.join(remote_mnt, 'systems', 'default', 'orig') - - @contextlib.contextmanager - def _created_orig_subvolume(self, location, remote_mnt, version_root): - self.status(msg='Creating "orig" subvolume') - old_orig = self.get_old_orig(location, remote_mnt) - new_orig = os.path.join(version_root, 'orig') - cliapp.ssh_runcmd(location, ['btrfs', 'subvolume', 'snapshot', - old_orig, new_orig]) - try: - yield new_orig - except BaseException as e: - ssh_runcmd_ignore_failure( - location, ['btrfs', 'subvolume', 'delete', new_orig]) - raise - - def populate_remote_orig(self, location, new_orig, temp_root): - '''Populate the subvolume version_root/orig on location''' - - self.status(msg='Populating "orig" subvolume') - cliapp.runcmd(['rsync', '-as', '--checksum', '--numeric-ids', - '--delete', temp_root + os.path.sep, - '%s:%s' % (location, new_orig)]) - - @contextlib.contextmanager - def _deployed_version(self, location, version_label, - system_config_sync, system_version_manager): - self.status(msg='Calling system-version-manager to deploy upgrade') - deployment = os.path.join('/systems', version_label, 'orig') - cliapp.ssh_runcmd(location, - ['env', 'BASEROCK_SYSTEM_CONFIG_SYNC='+system_config_sync, - system_version_manager, 'deploy', deployment]) - try: - yield deployment - except BaseException as e: - self.status(msg='Cleaning up failed version installation') - cliapp.ssh_runcmd(location, - [system_version_manager, 'remove', version_label]) - raise - - def upgrade_remote_system(self, location, temp_root): - root_disk = self.find_root_disk(location) - uuid = cliapp.ssh_runcmd(location, ['blkid', '-s', 'UUID', '-o', - 'value', root_disk]).strip() - - self.complete_fstab_for_btrfs_layout(temp_root, uuid) - - version_label = os.environ['VERSION_LABEL'] - autostart = self.get_environment_boolean('AUTOSTART') - - with self._remote_mount_point(location) as remote_mnt, \ - self._remote_mount(location, root_disk, remote_mnt), \ - self._created_version_root(location, remote_mnt, - version_label) as version_root, \ - self._created_orig_subvolume(location, remote_mnt, - version_root) as orig: - self.populate_remote_orig(location, orig, temp_root) - system_root = os.path.join(remote_mnt, 'systems', - version_label, 'orig') - config_sync = os.path.join(system_root, 'usr', 'bin', - 'baserock-system-config-sync') - version_manager = os.path.join(system_root, 'usr', 'bin', - 'system-version-manager') - with self._deployed_version(location, version_label, - config_sync, version_manager): - self.status(msg='Setting %(v)s as the new default system', - v=version_label) - cliapp.ssh_runcmd(location, [version_manager, - 'set-default', version_label]) - - if autostart: - self.status(msg="Rebooting into new system ...") - ssh_runcmd_ignore_failure(location, ['reboot']) - - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - temp_root, location = args - - self.upgrade_remote_system(location, temp_root) - - -SshRsyncWriteExtension().run() diff --git a/ssh-rsync.write.help b/ssh-rsync.write.help deleted file mode 100644 index f3f79ed5..00000000 --- a/ssh-rsync.write.help +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - - Upgrade a Baserock system which is already deployed: - - as a KVM/LibVirt, OpenStack or vbox-ssh virtual machine; - - on a Jetson board. - - Copies a binary delta over to the target system and arranges for it - to be bootable. - - The recommended way to use this extension is by calling `morph upgrade`. - Using `morph deploy --upgrade` is deprecated. - - The upgrade will fail if: - - no VM is deployed and running at `location`; - - the target system is not a Baserock system; - - the target's filesystem and its layout are not compatible with that - created by `morph deploy`." - - See also the 'Upgrading a Baserock installation' section of the 'Using - Baserock` page at wiki.baserock.org - http://wiki.baserock.org/devel-with/#index8h2 - - Parameters: - - * location: the 'user@hostname' string that will be used by ssh and rsync. - 'user' will always be `root` and `hostname` the hostname or address of - the system being upgraded. - - * VERSION_LABEL=label - **(MANDATORY)** should contain only alpha-numeric - characters and the '-' (hyphen) character. - - * AUTOSTART=` - boolean. If it is set, the VM will be started when - it has been deployed. - - (See `morph help deploy` for details of how to pass parameters to write - extensions) diff --git a/sshkeys.configure b/sshkeys.configure deleted file mode 100755 index 7a5a8379..00000000 --- a/sshkeys.configure +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh -# -# Copyright 2014 Codethink Ltd -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -set -e - -if [ "$SSHKEYS" ] -then - install -d -m 700 "$1/root/.ssh" - echo Adding Key in "$SSHKEYS" to authorized_keys file - cat $SSHKEYS >> "$1/root/.ssh/authorized_keys" -fi diff --git a/strip-gplv3.configure b/strip-gplv3.configure deleted file mode 100755 index c08061ad..00000000 --- a/strip-gplv3.configure +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -''' A Morph configuration extension for removing gplv3 chunks from a system - -Using a hard-coded list of chunks, it will read the system's /baserock metadata -to find the files created by that chunk, then remove them. - -''' - -import cliapp -import re -import os -import json - -class StripGPLv3ConfigureExtension(cliapp.Application): - gplv3_chunks = [ - ['autoconf', ''], - ['automake', ''], - ['bash', ''], - ['binutils', ''], - ['bison', ''], - ['ccache', ''], - ['cmake', ''], - ['flex', ''], - ['gawk', ''], - ['gcc', r'^.*lib.*\.so(\.\d+)*$'], - ['gdbm', ''], - ['gettext', ''], - ['gperf', ''], - ['groff', ''], - ['libtool', r'^.*lib.*\.so(\.\d+)*$'], - ['m4', ''], - ['make', ''], - ['nano', ''], - ['patch', ''], - ['rsync', ''], - ['texinfo-tarball', ''], - ] - - def process_args(self, args): - target_root = args[0] - meta_dir = os.path.join(target_root, 'baserock') - - for chunk in self.gplv3_chunks: - regex = os.path.join(meta_dir, "%s-[^-]\+\.meta" % chunk[0]) - artifacts = self.runcmd(['find', meta_dir, '-regex', regex]) - - for artifact in artifacts.split(): - self.remove_chunk(target_root, artifact, chunk[1]) - - os.symlink(os.path.join(os.sep, 'bin', 'busybox'), - os.path.join(target_root, 'usr', 'bin', 'awk')) - - def remove_chunk(self, target_root, chunk, pattern): - chunk_meta_path = os.path.join(target_root, 'baserock', chunk) - - with open(chunk_meta_path, 'r') as f: - chunk_meta_data = json.load(f) - - if not 'contents' in chunk_meta_data: - raise cliapp.AppError('Chunk %s does not have a "contents" list' - % chunk) - updated_contents = [] - for content_entry in reversed(chunk_meta_data['contents']): - pat = re.compile(pattern) - if len(pattern) == 0 or not pat.match(content_entry): - self.remove_content_entry(target_root, content_entry) - else: - updated_contents.append(content_entry) - - def remove_content_entry(self, target_root, content_entry): - entry_path = os.path.join(target_root, './' + content_entry) - if not entry_path.startswith(target_root): - raise cliapp.AppException('%s is not in %s' - % (entry_path, target_root)) - if os.path.exists(entry_path): - if os.path.islink(entry_path): - os.unlink(entry_path) - elif os.path.isfile(entry_path): - os.remove(entry_path) - elif os.path.isdir(entry_path): - if not os.listdir(entry_path): - os.rmdir(entry_path) - else: - raise cliapp.AppException('%s is not a link, file or directory' - % entry_path) -StripGPLv3ConfigureExtension().run() diff --git a/swift-build-rings.yml b/swift-build-rings.yml deleted file mode 100644 index 1ffe9c37..00000000 --- a/swift-build-rings.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - vars: - - rings: - - { name: account, port: 6002 } - - { name: container, port: 6001 } - - { name: object, port: 6000 } - remote_user: root - tasks: - - file: path={{ ansible_env.ROOT }}/etc/swift owner=root group=root state=directory - - - name: Create ring - shell: swift-ring-builder {{ item.name }}.builder create {{ ansible_env.SWIFT_PART_POWER }} - {{ ansible_env.SWIFT_REPLICAS }} {{ ansible_env.SWIFT_MIN_PART_HOURS }} - with_items: rings - - - name: Add each storage node to the ring - shell: swift-ring-builder {{ item[0].name }}.builder - add r1z1-{{ item[1].ip }}:{{ item[0].port }}/{{ item[1].device }} {{ item[1].weight }} - with_nested: - - rings - - ansible_env.SWIFT_STORAGE_DEVICES - - - name: Rebalance the ring - shell: swift-ring-builder {{ item.name }}.builder rebalance {{ ansible_env.SWIFT_REBALANCE_SEED }} - with_items: rings - - - name: Copy ring configuration files into place - copy: src={{ item.name }}.ring.gz dest={{ ansible_env.ROOT }}/etc/swift - with_items: rings - - - name: Copy ring builder files into place - copy: src={{ item.name }}.builder dest={{ ansible_env.ROOT }}/etc/swift - with_items: rings diff --git a/swift-storage-devices-validate.py b/swift-storage-devices-validate.py deleted file mode 100755 index 57ab23d0..00000000 --- a/swift-storage-devices-validate.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright © 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# This is used by the openstack-swift.configure extension -# to validate any provided storage device specifiers -# under SWIFT_STORAGE_DEVICES -# - - -''' - This is used by the swift-storage.configure extension - to validate any storage device specifiers specified - in the SWIFT_STORAGE_DEVICES environment variable -''' - -from __future__ import print_function - -import yaml -import sys - -EXAMPLE_DEVSPEC = '{device: sdb1, ip: 127.0.0.1, weight: 100}' -REQUIRED_KEYS = ['ip', 'device', 'weight'] - -def err(msg): - print(msg, file=sys.stderr) - sys.exit(1) - -if len(sys.argv) != 2: - err('usage: %s STRING_TO_BE_VALIDATED' % sys.argv[0]) - -swift_storage_devices = yaml.load(sys.argv[1]) - -if not isinstance(swift_storage_devices, list): - err('Expected list of device specifiers\n' - 'Example: [%s]' % EXAMPLE_DEVSPEC) - -for d in swift_storage_devices: - if not isinstance(d, dict): - err("Invalid device specifier: `%s'\n" - 'Device specifier must be a dictionary\n' - 'Example: %s' % (d, EXAMPLE_DEVSPEC)) - - if set(d.keys()) != set(REQUIRED_KEYS): - err("Invalid device specifier: `%s'\n" - 'Specifier should contain: %s\n' - 'Example: %s' % (d, str(REQUIRED_KEYS)[1:-1], EXAMPLE_DEVSPEC)) diff --git a/swift-storage.configure b/swift-storage.configure deleted file mode 100644 index 391b392a..00000000 --- a/swift-storage.configure +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash -# -# Copyright © 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -set -e - -# The ansible script needs to know where the rootfs is, so we export it here -export ROOT="$1" - -validate_number() { - local name="$1" - local value="$2" - - local pattern='^[0-9]+$' - if ! [[ $value =~ $pattern ]] - then - echo "'$name' must be a number" >&2 - exit 1 - fi -} - -validate_non_empty() { - local name="$1" - local value="$2" - - if [[ $value = None ]] - then - echo "'$name' cannot be empty" >&2 - exit 1 - fi -} - -MANDATORY_OPTIONS="SWIFT_HASH_PATH_PREFIX \ - SWIFT_HASH_PATH_SUFFIX \ - SWIFT_REBALANCE_SEED \ - SWIFT_PART_POWER \ - SWIFT_REPLICAS \ - SWIFT_MIN_PART_HOURS \ - SWIFT_STORAGE_DEVICES \ - CONTROLLER_HOST_ADDRESS \ - MANAGEMENT_INTERFACE_IP_ADDRESS" - -for option in $MANDATORY_OPTIONS -do - if ! [[ -v $option ]] - then - missing_option=True - echo "Required option $option isn't set!" >&2 - fi -done - -if [[ $missing_option = True ]]; then exit 1; fi - -./swift-storage-devices-validate.py "$SWIFT_STORAGE_DEVICES" - -# Validate SWIFT_PART_POWER, SWIFT_REPLICAS, SWIFT_MIN_PART_HOURS -# just make sure they're numbers - -validate_number "SWIFT_PART_POWER" "$SWIFT_PART_POWER" -validate_number "SWIFT_REPLICAS" "$SWIFT_REPLICAS" -validate_number "SWIFT_MIN_PART_HOURS" "$SWIFT_MIN_PART_HOURS" - -# Make sure these aren't empty -validate_non_empty "SWIFT_HASH_PATH_PREFIX" "$SWIFT_HASH_PATH_PREFIX" -validate_non_empty "SWIFT_HASH_PATH_SUFFIX" "$SWIFT_HASH_PATH_SUFFIX" -validate_non_empty "SWIFT_REBALANCE_SEED" "$SWIFT_REBALANCE_SEED" -validate_non_empty "CONTROLLER_HOST_ADDRESS" "$CONTROLLER_HOST_ADDRESS" -validate_non_empty "MANAGEMENT_INTERFACE_IP_ADDRESS" "$MANAGEMENT_INTERFACE_IP_ADDRESS" - -mkdir -p "$ROOT/usr/lib/systemd/system/multi-user.target.wants" # ensure this exists before we make symlinks - -# A swift controller needs the storage setup service -# but does not want any of the other storage services enabled -ln -s "/usr/lib/systemd/system/swift-storage-setup.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage-setup.service" - -SWIFT_CONTROLLER=${SWIFT_CONTROLLER:-False} - -if [[ $SWIFT_CONTROLLER = False ]] -then - ln -s "/usr/lib/systemd/system/rsync.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/rsync.service" - ln -s "/usr/lib/systemd/system/swift-storage.service" \ - "$ROOT/usr/lib/systemd/system/multi-user.target.wants/swift-storage.service" -fi - -# Build swift data structures (the rings) -/usr/bin/ansible-playbook -i hosts swift-build-rings.yml - -cat << EOF > "$ROOT"/usr/share/swift/swift-storage-vars.yml ---- -MANAGEMENT_INTERFACE_IP_ADDRESS: $MANAGEMENT_INTERFACE_IP_ADDRESS -SWIFT_HASH_PATH_PREFIX: $SWIFT_HASH_PATH_PREFIX -SWIFT_HASH_PATH_SUFFIX: $SWIFT_HASH_PATH_SUFFIX -EOF diff --git a/sysroot.check b/sysroot.check deleted file mode 100755 index 71b35175..00000000 --- a/sysroot.check +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh -# Copyright (C) 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -# Preparatory checks for Morph 'sysroot' write extension - -set -eu - -if [ "$UPGRADE" == "yes" ]; then - echo >&2 "ERROR: Cannot upgrade a sysroot deployment" - exit 1 -fi diff --git a/sysroot.write b/sysroot.write deleted file mode 100755 index 46f1a780..00000000 --- a/sysroot.write +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh -# Copyright (C) 2014,2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -# A Morph write extension to deploy to another directory - -set -eu - -mkdir -p "$2" - -cp -a "$1"/* "$2" diff --git a/systems/armv7lhf-cross-toolchain-system-x86_32.morph b/systems/armv7lhf-cross-toolchain-system-x86_32.morph index 9fe7888a..b0ac9cfe 100644 --- a/systems/armv7lhf-cross-toolchain-system-x86_32.morph +++ b/systems/armv7lhf-cross-toolchain-system-x86_32.morph @@ -11,9 +11,9 @@ strata: - name: cross-tools morph: strata/cross-tools.morph configuration-extensions: -- set-hostname -- add-config-files -- simple-network -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/armv7lhf-cross-toolchain-system-x86_64.morph b/systems/armv7lhf-cross-toolchain-system-x86_64.morph index c1de199c..1bd1adc1 100644 --- a/systems/armv7lhf-cross-toolchain-system-x86_64.morph +++ b/systems/armv7lhf-cross-toolchain-system-x86_64.morph @@ -11,9 +11,9 @@ strata: - name: cross-tools morph: strata/cross-tools.morph configuration-extensions: -- set-hostname -- add-config-files -- simple-network -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/base-system-armv7-highbank.morph b/systems/base-system-armv7-highbank.morph index ffc5e188..32d773e8 100644 --- a/systems/base-system-armv7-highbank.morph +++ b/systems/base-system-armv7-highbank.morph @@ -13,8 +13,8 @@ strata: - name: bsp-armv7-highbank morph: strata/bsp-armv7-highbank.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/base-system-armv7-versatile.morph b/systems/base-system-armv7-versatile.morph index 8de2b35f..4f039c02 100644 --- a/systems/base-system-armv7-versatile.morph +++ b/systems/base-system-armv7-versatile.morph @@ -13,8 +13,8 @@ strata: - name: bsp-armv7-versatile morph: strata/bsp-armv7-versatile.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/base-system-armv7b-highbank.morph b/systems/base-system-armv7b-highbank.morph index 23bf4dbf..969967b5 100644 --- a/systems/base-system-armv7b-highbank.morph +++ b/systems/base-system-armv7b-highbank.morph @@ -13,8 +13,8 @@ strata: - name: bsp-armv7b-highbank morph: strata/bsp-armv7b-highbank.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/base-system-armv7b-vexpress-tc2.morph b/systems/base-system-armv7b-vexpress-tc2.morph index b06ead7b..cbfedd89 100644 --- a/systems/base-system-armv7b-vexpress-tc2.morph +++ b/systems/base-system-armv7b-vexpress-tc2.morph @@ -12,8 +12,8 @@ strata: - name: bsp-armv7b-vexpress-tc2 morph: strata/bsp-armv7b-vexpress-tc2.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/base-system-armv7lhf-highbank.morph b/systems/base-system-armv7lhf-highbank.morph index c827f3a2..399931a5 100644 --- a/systems/base-system-armv7lhf-highbank.morph +++ b/systems/base-system-armv7lhf-highbank.morph @@ -13,8 +13,8 @@ strata: - name: bsp-armv7-highbank morph: strata/bsp-armv7-highbank.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/base-system-armv8b64.morph b/systems/base-system-armv8b64.morph index 49e7dac7..d23bde9b 100644 --- a/systems/base-system-armv8b64.morph +++ b/systems/base-system-armv8b64.morph @@ -14,9 +14,9 @@ strata: - name: bsp-armv8b64-generic morph: strata/bsp-armv8b64-generic.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- moonshot-kernel -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/moonshot-kernel +- extensions/install-essential-files diff --git a/systems/base-system-armv8l64.morph b/systems/base-system-armv8l64.morph index 560add69..24104a8b 100644 --- a/systems/base-system-armv8l64.morph +++ b/systems/base-system-armv8l64.morph @@ -14,9 +14,9 @@ strata: - name: bsp-armv8l64-generic morph: strata/bsp-armv8l64-generic.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- moonshot-kernel -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/moonshot-kernel +- extensions/install-essential-files diff --git a/systems/base-system-ppc64-generic.morph b/systems/base-system-ppc64-generic.morph index 3763cce5..6da852e3 100644 --- a/systems/base-system-ppc64-generic.morph +++ b/systems/base-system-ppc64-generic.morph @@ -13,8 +13,8 @@ strata: - name: bsp-ppc64-generic morph: strata/bsp-ppc64-generic.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/base-system-x86_32-generic.morph b/systems/base-system-x86_32-generic.morph index 7ada4052..d89fd913 100644 --- a/systems/base-system-x86_32-generic.morph +++ b/systems/base-system-x86_32-generic.morph @@ -12,8 +12,8 @@ strata: - name: bsp-x86_32-generic morph: strata/bsp-x86_32-generic.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/base-system-x86_64-generic.morph b/systems/base-system-x86_64-generic.morph index 796c8185..aa1659b3 100644 --- a/systems/base-system-x86_64-generic.morph +++ b/systems/base-system-x86_64-generic.morph @@ -13,8 +13,8 @@ strata: - name: bsp-x86_64-generic morph: strata/bsp-x86_64-generic.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/build-system-armv5l-openbmc-aspeed.morph b/systems/build-system-armv5l-openbmc-aspeed.morph index 4eb0b6e7..7230bbc0 100644 --- a/systems/build-system-armv5l-openbmc-aspeed.morph +++ b/systems/build-system-armv5l-openbmc-aspeed.morph @@ -35,9 +35,9 @@ strata: - name: mtd-utilities morph: strata/mtd-utilities.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/build-system-armv7lhf-highbank.morph b/systems/build-system-armv7lhf-highbank.morph index d43ac935..42156d78 100644 --- a/systems/build-system-armv7lhf-highbank.morph +++ b/systems/build-system-armv7lhf-highbank.morph @@ -44,12 +44,12 @@ strata: - name: unionfs-fuse-group morph: strata/unionfs-fuse-group.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- distbuild -- fstab -- mason -- cloud-init -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/distbuild +- extensions/fstab +- extensions/mason +- extensions/cloud-init +- extensions/install-essential-files diff --git a/systems/build-system-armv7lhf-jetson.morph b/systems/build-system-armv7lhf-jetson.morph index fa948037..3bdcf9f0 100644 --- a/systems/build-system-armv7lhf-jetson.morph +++ b/systems/build-system-armv7lhf-jetson.morph @@ -42,12 +42,12 @@ strata: - name: ostree-core morph: strata/ostree-core.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- distbuild -- fstab -- mason -- cloud-init -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/distbuild +- extensions/fstab +- extensions/mason +- extensions/cloud-init +- extensions/install-essential-files diff --git a/systems/build-system-armv8b64.morph b/systems/build-system-armv8b64.morph index 84495016..aa16f545 100644 --- a/systems/build-system-armv8b64.morph +++ b/systems/build-system-armv8b64.morph @@ -45,13 +45,13 @@ strata: - name: devtools morph: strata/devtools.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- distbuild -- fstab -- mason -- cloud-init -- moonshot-kernel -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/distbuild +- extensions/fstab +- extensions/mason +- extensions/cloud-init +- extensions/moonshot-kernel +- extensions/install-essential-files diff --git a/systems/build-system-armv8l64.morph b/systems/build-system-armv8l64.morph index f79fb76e..e7ddf034 100644 --- a/systems/build-system-armv8l64.morph +++ b/systems/build-system-armv8l64.morph @@ -45,13 +45,13 @@ strata: - name: ostree-core morph: strata/ostree-core.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- distbuild -- fstab -- mason -- cloud-init -- moonshot-kernel -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/distbuild +- extensions/fstab +- extensions/mason +- extensions/cloud-init +- extensions/moonshot-kernel +- extensions/install-essential-files diff --git a/systems/build-system-ppc64.morph b/systems/build-system-ppc64.morph index 38f2e9f3..642df309 100644 --- a/systems/build-system-ppc64.morph +++ b/systems/build-system-ppc64.morph @@ -42,12 +42,12 @@ strata: - name: ostree-core morph: strata/ostree-core.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- distbuild -- fstab -- mason -- cloud-init -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/distbuild +- extensions/fstab +- extensions/mason +- extensions/cloud-init +- extensions/install-essential-files diff --git a/systems/build-system-x86_32-chroot.morph b/systems/build-system-x86_32-chroot.morph index f193841f..646ae6a8 100644 --- a/systems/build-system-x86_32-chroot.morph +++ b/systems/build-system-x86_32-chroot.morph @@ -42,12 +42,12 @@ strata: - name: ostree-core morph: strata/ostree-core.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- distbuild -- fstab -- mason -- cloud-init -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/distbuild +- extensions/fstab +- extensions/mason +- extensions/cloud-init +- extensions/install-essential-files diff --git a/systems/build-system-x86_32.morph b/systems/build-system-x86_32.morph index a802b64f..c14b54f2 100644 --- a/systems/build-system-x86_32.morph +++ b/systems/build-system-x86_32.morph @@ -44,12 +44,12 @@ strata: - name: ostree-core morph: strata/ostree-core.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- distbuild -- fstab -- mason -- cloud-init -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/distbuild +- extensions/fstab +- extensions/mason +- extensions/cloud-init +- extensions/install-essential-files diff --git a/systems/build-system-x86_64-chroot.morph b/systems/build-system-x86_64-chroot.morph index fa54f9d3..82493d1e 100644 --- a/systems/build-system-x86_64-chroot.morph +++ b/systems/build-system-x86_64-chroot.morph @@ -42,12 +42,12 @@ strata: - name: ostree-core morph: strata/ostree-core.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- distbuild -- fstab -- mason -- cloud-init -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/distbuild +- extensions/fstab +- extensions/mason +- extensions/cloud-init +- extensions/install-essential-files diff --git a/systems/build-system-x86_64.morph b/systems/build-system-x86_64.morph index 8fe5f91f..4b688555 100644 --- a/systems/build-system-x86_64.morph +++ b/systems/build-system-x86_64.morph @@ -44,12 +44,12 @@ strata: - name: ostree-core morph: strata/ostree-core.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- distbuild -- fstab -- mason -- cloud-init -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/distbuild +- extensions/fstab +- extensions/mason +- extensions/cloud-init +- extensions/install-essential-files diff --git a/systems/ceph-service-x86_64-generic.morph b/systems/ceph-service-x86_64-generic.morph index 7431e56a..daaa6957 100644 --- a/systems/ceph-service-x86_64-generic.morph +++ b/systems/ceph-service-x86_64-generic.morph @@ -55,10 +55,10 @@ strata: - name: chef morph: strata/chef.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- ceph -- cloud-init -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/ceph +- extensions/cloud-init +- extensions/install-essential-files diff --git a/systems/cxmanage-system-x86_64-generic.morph b/systems/cxmanage-system-x86_64-generic.morph index aaa0fa81..ed7e8d63 100644 --- a/systems/cxmanage-system-x86_64-generic.morph +++ b/systems/cxmanage-system-x86_64-generic.morph @@ -16,9 +16,9 @@ strata: - name: cxmanage morph: strata/cxmanage.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7-chroot.morph b/systems/devel-system-armv7-chroot.morph index 620d8fb9..d7a06fe9 100644 --- a/systems/devel-system-armv7-chroot.morph +++ b/systems/devel-system-armv7-chroot.morph @@ -54,9 +54,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7-highbank.morph b/systems/devel-system-armv7-highbank.morph index a92561fe..854edd76 100644 --- a/systems/devel-system-armv7-highbank.morph +++ b/systems/devel-system-armv7-highbank.morph @@ -57,9 +57,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7-versatile.morph b/systems/devel-system-armv7-versatile.morph index 50588f66..34c9a239 100644 --- a/systems/devel-system-armv7-versatile.morph +++ b/systems/devel-system-armv7-versatile.morph @@ -55,9 +55,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7-wandboard.morph b/systems/devel-system-armv7-wandboard.morph index e2c9e175..300981f9 100644 --- a/systems/devel-system-armv7-wandboard.morph +++ b/systems/devel-system-armv7-wandboard.morph @@ -55,9 +55,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7b-chroot.morph b/systems/devel-system-armv7b-chroot.morph index 34bc04f1..4eed73e4 100644 --- a/systems/devel-system-armv7b-chroot.morph +++ b/systems/devel-system-armv7b-chroot.morph @@ -46,9 +46,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7b-highbank.morph b/systems/devel-system-armv7b-highbank.morph index cddd5ff4..ba5de00b 100644 --- a/systems/devel-system-armv7b-highbank.morph +++ b/systems/devel-system-armv7b-highbank.morph @@ -53,9 +53,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7lhf-chroot.morph b/systems/devel-system-armv7lhf-chroot.morph index a8d46bd1..d0917fcb 100644 --- a/systems/devel-system-armv7lhf-chroot.morph +++ b/systems/devel-system-armv7lhf-chroot.morph @@ -54,9 +54,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7lhf-highbank.morph b/systems/devel-system-armv7lhf-highbank.morph index 9722644c..189c2913 100644 --- a/systems/devel-system-armv7lhf-highbank.morph +++ b/systems/devel-system-armv7lhf-highbank.morph @@ -60,9 +60,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7lhf-jetson.morph b/systems/devel-system-armv7lhf-jetson.morph index e3d1843d..1c39b3ea 100644 --- a/systems/devel-system-armv7lhf-jetson.morph +++ b/systems/devel-system-armv7lhf-jetson.morph @@ -58,9 +58,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv7lhf-wandboard.morph b/systems/devel-system-armv7lhf-wandboard.morph index a47df980..4432f947 100644 --- a/systems/devel-system-armv7lhf-wandboard.morph +++ b/systems/devel-system-armv7lhf-wandboard.morph @@ -58,9 +58,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-armv8b64.morph b/systems/devel-system-armv8b64.morph index 6c5f23ee..44ff2d53 100644 --- a/systems/devel-system-armv8b64.morph +++ b/systems/devel-system-armv8b64.morph @@ -57,11 +57,11 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- simple-network -- nfsboot -- install-files -- cloud-init -- moonshot-kernel -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/cloud-init +- extensions/moonshot-kernel +- extensions/install-essential-files diff --git a/systems/devel-system-armv8l64.morph b/systems/devel-system-armv8l64.morph index cd7a1e44..cad71c4e 100644 --- a/systems/devel-system-armv8l64.morph +++ b/systems/devel-system-armv8l64.morph @@ -57,12 +57,12 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- simple-network -- nfsboot -- install-files -- cloud-init -- moonshot-kernel -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/cloud-init +- extensions/moonshot-kernel +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-ppc64-chroot.morph b/systems/devel-system-ppc64-chroot.morph index b92073a9..fe437767 100644 --- a/systems/devel-system-ppc64-chroot.morph +++ b/systems/devel-system-ppc64-chroot.morph @@ -52,9 +52,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-ppc64-generic.morph b/systems/devel-system-ppc64-generic.morph index 4d81ff5c..26776429 100644 --- a/systems/devel-system-ppc64-generic.morph +++ b/systems/devel-system-ppc64-generic.morph @@ -55,9 +55,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-x86_32-chroot.morph b/systems/devel-system-x86_32-chroot.morph index 7eb4fc01..bc3872a7 100644 --- a/systems/devel-system-x86_32-chroot.morph +++ b/systems/devel-system-x86_32-chroot.morph @@ -54,9 +54,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-x86_32-generic.morph b/systems/devel-system-x86_32-generic.morph index 1fd44086..952bff79 100644 --- a/systems/devel-system-x86_32-generic.morph +++ b/systems/devel-system-x86_32-generic.morph @@ -59,10 +59,10 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- cloud-init -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/cloud-init +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-x86_64-chroot.morph b/systems/devel-system-x86_64-chroot.morph index 48f23cd0..394201e0 100644 --- a/systems/devel-system-x86_64-chroot.morph +++ b/systems/devel-system-x86_64-chroot.morph @@ -56,9 +56,9 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-x86_64-generic.morph b/systems/devel-system-x86_64-generic.morph index b49964dd..fe56a5de 100644 --- a/systems/devel-system-x86_64-generic.morph +++ b/systems/devel-system-x86_64-generic.morph @@ -59,11 +59,11 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- simple-network -- nfsboot -- install-files -- cloud-init -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/cloud-init +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/devel-system-x86_64-vagrant.morph b/systems/devel-system-x86_64-vagrant.morph index c6a5f6fe..b8d4d2e7 100644 --- a/systems/devel-system-x86_64-vagrant.morph +++ b/systems/devel-system-x86_64-vagrant.morph @@ -57,10 +57,10 @@ strata: - name: coreutils-common morph: strata/coreutils-common.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- vagrant -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/vagrant +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/genivi-baseline-system-armv7lhf-jetson.morph b/systems/genivi-baseline-system-armv7lhf-jetson.morph index 9306426d..71247d39 100644 --- a/systems/genivi-baseline-system-armv7lhf-jetson.morph +++ b/systems/genivi-baseline-system-armv7lhf-jetson.morph @@ -38,10 +38,10 @@ strata: - name: weston-genivi morph: strata/weston-genivi.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- strip-gplv3 -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/strip-gplv3 +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/genivi-baseline-system-armv7lhf-versatile.morph b/systems/genivi-baseline-system-armv7lhf-versatile.morph index 698230bb..429ca2d8 100644 --- a/systems/genivi-baseline-system-armv7lhf-versatile.morph +++ b/systems/genivi-baseline-system-armv7lhf-versatile.morph @@ -36,10 +36,10 @@ strata: - name: weston-genivi morph: strata/weston-genivi.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- strip-gplv3 -- fstab -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/strip-gplv3 +- extensions/fstab +- extensions/install-essential-files diff --git a/systems/genivi-baseline-system-x86_64-generic.morph b/systems/genivi-baseline-system-x86_64-generic.morph index f04485bc..6048e078 100644 --- a/systems/genivi-baseline-system-x86_64-generic.morph +++ b/systems/genivi-baseline-system-x86_64-generic.morph @@ -40,9 +40,9 @@ strata: - name: weston-genivi morph: strata/weston-genivi.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- strip-gplv3 -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/strip-gplv3 +- extensions/install-essential-files diff --git a/systems/installer-system-armv8b64.morph b/systems/installer-system-armv8b64.morph index 726354c9..f1a83d6d 100644 --- a/systems/installer-system-armv8b64.morph +++ b/systems/installer-system-armv8b64.morph @@ -28,9 +28,9 @@ strata: - name: installer-utils morph: strata/installer-utils.morph configuration-extensions: -- set-hostname -- install-files -- fstab -- installer -- moonshot-kernel -- install-essential-files +- extensions/set-hostname +- extensions/install-files +- extensions/fstab +- extensions/installer +- extensions/moonshot-kernel +- extensions/install-essential-files diff --git a/systems/installer-system-x86_64.morph b/systems/installer-system-x86_64.morph index 3d0ced5d..e9c4cd94 100644 --- a/systems/installer-system-x86_64.morph +++ b/systems/installer-system-x86_64.morph @@ -28,8 +28,8 @@ strata: - name: installer-utils morph: strata/installer-utils.morph configuration-extensions: -- set-hostname -- install-files -- fstab -- installer -- install-essential-files +- extensions/set-hostname +- extensions/install-files +- extensions/fstab +- extensions/installer +- extensions/install-essential-files diff --git a/systems/minimal-system-armv5l-openbmc-aspeed.morph b/systems/minimal-system-armv5l-openbmc-aspeed.morph index fe596057..602e05c7 100644 --- a/systems/minimal-system-armv5l-openbmc-aspeed.morph +++ b/systems/minimal-system-armv5l-openbmc-aspeed.morph @@ -12,9 +12,9 @@ strata: artifacts: - bsp-armv5l-openbmc-aspeed-runtime configuration-extensions: -- set-hostname -- simple-network -- nfsboot -- install-files -- busybox-init -- install-essential-files +- extensions/set-hostname +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/busybox-init +- extensions/install-essential-files diff --git a/systems/minimal-system-x86_32-generic.morph b/systems/minimal-system-x86_32-generic.morph index 785a72a2..2e9f79c5 100644 --- a/systems/minimal-system-x86_32-generic.morph +++ b/systems/minimal-system-x86_32-generic.morph @@ -13,9 +13,9 @@ strata: artifacts: - bsp-x86_32-generic-runtime configuration-extensions: -- set-hostname -- simple-network -- nfsboot -- install-files -- busybox-init -- install-essential-files +- extensions/set-hostname +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/busybox-init +- extensions/install-essential-files diff --git a/systems/minimal-system-x86_64-generic.morph b/systems/minimal-system-x86_64-generic.morph index 9da22ec8..afd9460c 100644 --- a/systems/minimal-system-x86_64-generic.morph +++ b/systems/minimal-system-x86_64-generic.morph @@ -13,9 +13,9 @@ strata: artifacts: - bsp-x86_64-generic-runtime configuration-extensions: -- set-hostname -- simple-network -- nfsboot -- install-files -- busybox-init -- install-essential-files +- extensions/set-hostname +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/busybox-init +- extensions/install-essential-files diff --git a/systems/nodejs-system-x86_64.morph b/systems/nodejs-system-x86_64.morph index d5ebcf30..b758d5bd 100644 --- a/systems/nodejs-system-x86_64.morph +++ b/systems/nodejs-system-x86_64.morph @@ -15,8 +15,8 @@ strata: - name: nodejs morph: strata/nodejs.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/ocaml-system-x86_64.morph b/systems/ocaml-system-x86_64.morph index 1903e4d6..9ac35d6f 100644 --- a/systems/ocaml-system-x86_64.morph +++ b/systems/ocaml-system-x86_64.morph @@ -14,7 +14,7 @@ strata: - name: ocaml-language morph: strata/ocaml-language.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files diff --git a/systems/openstack-system-x86_64.morph b/systems/openstack-system-x86_64.morph index 8ab38bee..2f700e68 100644 --- a/systems/openstack-system-x86_64.morph +++ b/systems/openstack-system-x86_64.morph @@ -66,20 +66,20 @@ strata: - name: python-tools morph: strata/python-tools.morph configuration-extensions: -- set-hostname -- add-config-files -- simple-network -- nfsboot -- install-files -- cloud-init -- hosts -- openstack-keystone -- openstack-glance -- openstack-cinder -- openstack-nova -- openstack-network -- openstack-neutron -- openstack-ceilometer -- fstab -- openstack-ironic -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/cloud-init +- extensions/hosts +- extensions/openstack-keystone +- extensions/openstack-glance +- extensions/openstack-cinder +- extensions/openstack-nova +- extensions/openstack-network +- extensions/openstack-neutron +- extensions/openstack-ceilometer +- extensions/fstab +- extensions/openstack-ironic +- extensions/install-essential-files diff --git a/systems/qt4-devel-system-x86_64-generic.morph b/systems/qt4-devel-system-x86_64-generic.morph index 15b85f70..c7bad950 100644 --- a/systems/qt4-devel-system-x86_64-generic.morph +++ b/systems/qt4-devel-system-x86_64-generic.morph @@ -37,8 +37,8 @@ strata: - name: enlightenment morph: strata/enlightenment.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/qt5-devel-system-x86_64-generic.morph b/systems/qt5-devel-system-x86_64-generic.morph index 3cdce60c..a1f38e9e 100644 --- a/systems/qt5-devel-system-x86_64-generic.morph +++ b/systems/qt5-devel-system-x86_64-generic.morph @@ -39,8 +39,8 @@ strata: - name: enlightenment morph: strata/enlightenment.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/swift-system-x86_64.morph b/systems/swift-system-x86_64.morph index 81738558..c959b6f7 100644 --- a/systems/swift-system-x86_64.morph +++ b/systems/swift-system-x86_64.morph @@ -24,11 +24,11 @@ strata: - name: openstack-common morph: strata/openstack-common.morph configuration-extensions: -- set-hostname -- add-config-files -- simple-network -- nfsboot -- install-files -- fstab -- swift-storage -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/fstab +- extensions/swift-storage +- extensions/install-essential-files diff --git a/systems/trove-system-x86_64.morph b/systems/trove-system-x86_64.morph index 0a5692f5..efc66b5c 100644 --- a/systems/trove-system-x86_64.morph +++ b/systems/trove-system-x86_64.morph @@ -48,10 +48,10 @@ strata: - name: devtools morph: strata/devtools.morph configuration-extensions: -- set-hostname -- trove -- nfsboot-server -- fstab -- install-files -- cloud-init -- install-essential-files +- extensions/set-hostname +- extensions/trove +- extensions/nfsboot-server +- extensions/fstab +- extensions/install-files +- extensions/cloud-init +- extensions/install-essential-files diff --git a/systems/web-system-x86_64-generic.morph b/systems/web-system-x86_64-generic.morph index 0b6e84b1..3f477833 100644 --- a/systems/web-system-x86_64-generic.morph +++ b/systems/web-system-x86_64-generic.morph @@ -30,8 +30,8 @@ strata: - name: nodejs morph: strata/nodejs.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/weston-system-armv7lhf-jetson.morph b/systems/weston-system-armv7lhf-jetson.morph index ccb2c3ee..78ddc8b4 100644 --- a/systems/weston-system-armv7lhf-jetson.morph +++ b/systems/weston-system-armv7lhf-jetson.morph @@ -42,8 +42,8 @@ strata: - name: tools morph: strata/tools.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/weston-system-x86_64-generic.morph b/systems/weston-system-x86_64-generic.morph index 84f0bad0..d927dd58 100644 --- a/systems/weston-system-x86_64-generic.morph +++ b/systems/weston-system-x86_64-generic.morph @@ -42,8 +42,8 @@ strata: - name: tools morph: strata/tools.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/xfce-system.morph b/systems/xfce-system.morph index 643291d6..f87b0982 100644 --- a/systems/xfce-system.morph +++ b/systems/xfce-system.morph @@ -48,8 +48,8 @@ strata: - name: xfce morph: strata/xfce.morph configuration-extensions: -- set-hostname -- add-config-files -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/zookeeper-client-x86_64.morph b/systems/zookeeper-client-x86_64.morph index a9b01cd5..bc470df5 100644 --- a/systems/zookeeper-client-x86_64.morph +++ b/systems/zookeeper-client-x86_64.morph @@ -20,9 +20,9 @@ strata: - name: test-tools morph: strata/test-tools.morph configuration-extensions: -- set-hostname -- add-config-files -- simple-network -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/systems/zookeeper-server-x86_64.morph b/systems/zookeeper-server-x86_64.morph index 987ba9e1..bc46d7b3 100644 --- a/systems/zookeeper-server-x86_64.morph +++ b/systems/zookeeper-server-x86_64.morph @@ -20,9 +20,9 @@ strata: - name: test-tools morph: strata/test-tools.morph configuration-extensions: -- set-hostname -- add-config-files -- simple-network -- nfsboot -- install-files -- install-essential-files +- extensions/set-hostname +- extensions/add-config-files +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/install-essential-files diff --git a/tar.check b/tar.check deleted file mode 100755 index f2304d46..00000000 --- a/tar.check +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -# Preparatory checks for Morph 'tar' write extension - -set -eu - -if [ "$UPGRADE" == "yes" ]; then - echo >&2 "ERROR: Cannot upgrade a tar file deployment." - exit 1 -fi diff --git a/tar.write b/tar.write deleted file mode 100755 index 01b545b4..00000000 --- a/tar.write +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -# Copyright (C) 2013,2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -# A Morph write extension to deploy to a .tar file - -set -eu - -tar -C "$1" -cf "$2" . diff --git a/tar.write.help b/tar.write.help deleted file mode 100644 index b45c61fa..00000000 --- a/tar.write.help +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - Create a .tar file of the deployed system. - - The `location` argument is a pathname to the .tar file to be - created. diff --git a/trove.configure b/trove.configure deleted file mode 100755 index f823762c..00000000 --- a/trove.configure +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/sh -# -# Copyright (C) 2013 - 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# This is a "morph deploy" configuration extension to fully configure -# a Trove instance at deployment time. It uses the following variables -# from the environment (run `morph help trove.configure` to see a description -# of them): -# -# * TROVE_ID -# * TROVE_HOSTNAME (optional, defaults to TROVE_ID) -# * TROVE_COMPANY -# * LORRY_SSH_KEY -# * UPSTREAM_TROVE -# * UPSTREAM_TROVE_PROTOCOL -# * TROVE_ADMIN_USER -# * TROVE_ADMIN_EMAIL -# * TROVE_ADMIN_NAME -# * TROVE_ADMIN_SSH_PUBKEY -# * LORRY_CONTROLLER_MINIONS (optional, defaults to 4) -# * TROVE_BACKUP_KEYS - a space-separated list of paths to SSH keys. -# (optional) -# * TROVE_GENERIC (optional) -# -# The configuration of a Trove is slightly tricky: part of it has to -# be run on the configured system after it has booted. We accomplish -# this by copying in all the relevant data to the target system -# (in /var/lib/trove-setup), and creating a systemd unit file that -# runs on the first boot. The first boot will be detected by the -# existence of the /var/lib/trove-setup/needed file. - -set -e - -if [ "$TROVE_GENERIC" ] -then - echo "Not configuring the trove, it will be generic" - exit 0 -fi - - -# Check that all the variables needed are present: - -error_vars=false -if test "x$TROVE_ID" = "x"; then - echo "ERROR: TROVE_ID needs to be defined." - error_vars=true -fi - -if test "x$TROVE_COMPANY" = "x"; then - echo "ERROR: TROVE_COMPANY needs to be defined." - error_vars=true -fi - -if test "x$TROVE_ADMIN_USER" = "x"; then - echo "ERROR: TROVE_ADMIN_USER needs to be defined." - error_vars=true -fi - -if test "x$TROVE_ADMIN_NAME" = "x"; then - echo "ERROR: TROVE_ADMIN_NAME needs to be defined." - error_vars=true -fi - -if test "x$TROVE_ADMIN_EMAIL" = "x"; then - echo "ERROR: TROVE_ADMIN_EMAIL needs to be defined." - error_vars=true -fi - -if ! ssh-keygen -lf $LORRY_SSH_KEY > /dev/null 2>&1 -then - echo "ERROR: LORRY_SSH_KEY is not a vaild ssh key." - error_vars=true -fi - -if ! ssh-keygen -lf $WORKER_SSH_PUBKEY > /dev/null 2>&1 -then - echo "ERROR: WORKER_SSH_PUBKEY is not a vaild ssh key." - error_vars=true -fi - -if ! ssh-keygen -lf $TROVE_ADMIN_SSH_PUBKEY > /dev/null 2>&1 -then - echo "ERROR: TROVE_ADMIN_SSH_PUBKEY is not a vaild ssh key." - error_vars=true -fi - -if "$error_vars"; then - exit 1 -fi - -ROOT="$1" - - -TROVE_DATA="$ROOT/etc/trove" -mkdir -p "$TROVE_DATA" - -install -m 0600 "$LORRY_SSH_KEY" "$TROVE_DATA/lorry.key" -install -m 0644 "${LORRY_SSH_KEY}.pub" "$TROVE_DATA/lorry.key.pub" -install -m 0644 "$TROVE_ADMIN_SSH_PUBKEY" "$TROVE_DATA/admin.key.pub" -install -m 0644 "$WORKER_SSH_PUBKEY" "$TROVE_DATA/worker.key.pub" - - -python <<'EOF' >"$TROVE_DATA/trove.conf" -import os, sys, yaml - -trove_configuration={ - 'TROVE_ID': os.environ['TROVE_ID'], - 'TROVE_COMPANY': os.environ['TROVE_COMPANY'], - 'TROVE_ADMIN_USER': os.environ['TROVE_ADMIN_USER'], - 'TROVE_ADMIN_EMAIL': os.environ['TROVE_ADMIN_EMAIL'], - 'TROVE_ADMIN_NAME': os.environ['TROVE_ADMIN_NAME'], - 'LORRY_SSH_KEY': '/etc/trove/lorry.key', - 'LORRY_SSH_PUBKEY': '/etc/trove/lorry.key.pub', - 'TROVE_ADMIN_SSH_PUBKEY': '/etc/trove/admin.key.pub', - 'WORKER_SSH_PUBKEY': '/etc/trove/worker.key.pub', -} - - - -optional_keys = ('MASON_ID', 'HOSTNAME', 'TROVE_HOSTNAME', - 'LORRY_CONTROLLER_MINIONS', 'TROVE_BACKUP_KEYS', - 'UPSTREAM_TROVE', 'UPSTREAM_TROVE_PROTOCOL') - -for key in optional_keys: - if key in os.environ: - trove_configuration[key]=os.environ[key] - -yaml.dump(trove_configuration, sys.stdout, default_flow_style=False) -EOF - -if [ -n "$TROVE_BACKUP_KEYS" ]; then - mkdir -p "$TROVE_DATA/backup-keys" - cp -- $TROVE_BACKUP_KEYS "$TROVE_DATA/backup-keys" - echo "TROVE_BACKUP_KEYS: /etc/trove/backup-keys/*" >> "$TROVE_DATA/trove.conf" -fi diff --git a/trove.configure.help b/trove.configure.help deleted file mode 100644 index c96bdf74..00000000 --- a/trove.configure.help +++ /dev/null @@ -1,126 +0,0 @@ -help: | - This is a "morph deploy" configuration extension to fully configure - a Trove instance at deployment time. It uses the following - configuration variables: - - * `TROVE_ID` - * `TROVE_HOSTNAME` (optional, defaults to `TROVE_ID`) - * `TROVE_COMPANY` - * `LORRY_SSH_KEY` - * `UPSTREAM_TROVE` - * `TROVE_ADMIN_USER` - * `TROVE_ADMIN_EMAIL` - * `TROVE_ADMIN_NAME` - * `TROVE_ADMIN_SSH_PUBKEY` - * `LORRY_CONTROLLER_MINIONS` (optional, defaults to 4) - * `TROVE_BACKUP_KEYS` - a space-separated list of paths to SSH keys. - (optional) - - The variables are described in more detail below. - - A Trove deployment needs to know the following things: - - * The Trove's ID and public name. - * The Trove's administrator name and access details. - * Private and public SSH keys for the Lorry user on the Trove. - * Which upstream Trove it should be set to mirror upon initial deploy. - - These are specified with the configuration variables described in this - help. - - * `TROVE_GENERIC` -- boolean. If it's true the trove will be generic - and it won't be configured with any of the other variables listed - here. - - * `TROVE_ID` -- the identifier of the Trove. This separates it from - other Troves, and allows mirroring of Troves to happen without local - changes getting overwritten. - - The Trove ID is used in several ways. Any local repositories (those not - mirrored from elsewhere) get created under a prefix that is the ID. - Thus, the local repositories on the `git.baserock.org` Trove, whose - Trove ID is `baserock`, are named - `baserock/baserock/definitions.git` and similar. The ID is used - there twice: first as a prefix and then as a "project name" within - that prefix. There can be more projects under the prefix. For - example, there is a `baserock/local-config/lorries.git` repository, - where `local-config` is a separate project from `baserock`. Projects - here are a concept for the Trove's git access control language. - - The Trove ID also used as the prefix for any branch and tag names - created locally for repositories that are not local. Thus, in the - `delta/linux.git` repository, any local branches would be called - something like `baserock/morph`, instead of just `morph`. The - Trove's git access control prevents normal uses from pushing - branches and tags that do not have the Trove ID as the prefix. - - * `TROVE_HOSTNAME` -- the public name of the Trove. This is an - optional setting, and defaults to `TROVE_ID`. The public name is - typically the domain name of the server (e.g., `git.baserock.org`), - but can also be an IP address. This setting is used when Trove needs - to generate URLs that point to itself, such as the `git://` and - `http://` URLs for each git repository that is viewed via the web - interface. - - Note that this is _not_ the system hostname. That is set separately, - with the `HOSTNAME` configuration setting (see the - `set-hostname.configure` extension). - - * `TROVE_COMPANY` -- a description of the organisation who own the - Trove. This is shown in various parts of the web interface of the - Trove. It is for descriptive purposes only. - - * `LORRY_SSH_KEY` -- ssh key pair that the Trove's Lorry will use to - access an upstream Trove, and to push updates to the Trove's git - server. - - The value is a filename on the system doing the deployment (where - `morph deploy` is run). The file contains the _private_ key, and the - public key is in a file with the `.pub` suffix added to the name. - - The upstream Trove needs to be configured to allow this key to - access it. This configuration does not do that automatically. - - * `UPSTREAM_TROVE` -- public name of the upstream Trove (domain - name or IP address). This is an optional setting. If it's set, - the new Trove will be configured to mirror that Trove. - - * `TROVE_ADMIN_USER`, `TROVE_ADMIN_EMAIL`, `TROVE_ADMIN_NAME`, - `TROVE_ADMIN_SSH_PUBKEY` -- details of the Trove's (initial) - administrator. - - Each Trove needs at least one administrator user, and one is created - upon initial deployment. `TROVE_ADMIN_USER` is the username of the - account to be created, `TROVE_ADMIN_EMAIL` should be the e-mail of - the user, and `TROVE_ADMIN_NAME` is their name. If more - administrators are needed, the initial person should create them - using the usual Gitano commands. - - * `LORRY_CONTROLLER_MINIONS` -- the number of Lorry Controller worker - processes to start. This is an optional setting and defaults to 4. - The more workers are running, the more Lorry jobs can run at the same - time, but the more resources they require. - - * `TROVE_BACKUP_KEYS` -- a space-separated list of paths to SSH keys. - If this is set, the Trove will have a backup user that can be accessed - with rsync using the SSH keys provided. - - Example - ------- - - The following set of variables could be to deploy a Trove instance: - - TROVE_ID: my-trove - TROVE_HOSTNAME: my-trove.example.com - TROVE_COMPANY: My Personal Trove for Me, Myself and I - LORRY_SSH_KEY: my-trove/lorry.key - UPSTREAM_TROVE: git.baserock.org - UPSTREAM_TROVE_USER: my-trove - UPSTREAM_TROVE_EMAIL: my-trove@example.com - TROVE_ADMIN_USER: tomjon - TROVE_ADMIN_EMAIL: tomjon@example.com - TROVE_ADMIN_NAME: Tomjon of Lancre - TROVE_ADMIN_SSH_PUBKEY: my-trove/tomjon.key.pub - - These would be put into the cluster morphology used to do the - deployment. diff --git a/vagrant.configure b/vagrant.configure deleted file mode 100644 index abc3ea0c..00000000 --- a/vagrant.configure +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/sh -# Copyright (C) 2014 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License.5 -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -set -e - -ROOT="$1" - -if test "x$VAGRANT" = "x"; then - exit 0 -fi - -for needed in etc/ssh/sshd_config etc/sudoers; do - if ! test -e "$ROOT/$needed"; then - echo >&2 "Unable to find $needed" - echo >&2 "Cannot continue configuring as Vagrant basebox" - exit 1 - fi -done - -# SSH daemon needs to be configured to not use DNS... -sed -i -e's/^(.*[Uu]][Ss][Ee][Dd][Nn][Ss].*)$/#\1/' "$ROOT/etc/ssh/sshd_config" -echo "UseDNS no" >> "$ROOT/etc/ssh/sshd_config" - -# We need to add a vagrant user with "vagrant" as the password We're doing this -# manually because chrooting in to run adduser is not really allowed for -# deployment time since we wouldn't be able to run the adduser necessarily. In -# practice for now we'd be able to because we can't deploy raw disks -# cross-platform and expect extlinux to install but we won't, for good -# practice and to hilight this deficiency. -echo 'vagrant:x:1000:1000:Vagrant User:/home/vagrant:/bin/bash' >> "$ROOT/etc/passwd" -echo 'vagrant:/6PTOoWylhw3w:16198:0:99999:7:::' >> "$ROOT/etc/shadow" -echo 'vagrant:x:1000:' >> "$ROOT/etc/group" -mkdir -p "$ROOT/home/vagrant" -chown -R 1000:1000 "$ROOT/home/vagrant" - -# Next, the vagrant user is meant to have sudo access -echo 'vagrant ALL=(ALL) NOPASSWD: ALL' >> "$ROOT/etc/sudoers" - -# And ensure that we get sbin in our path -echo 'PATH="$PATH:/sbin:/usr/sbin"' >> "$ROOT/etc/profile" -echo 'export PATH' >> "$ROOT/etc/profile" - diff --git a/vdaboot.configure b/vdaboot.configure deleted file mode 100755 index 60de925b..00000000 --- a/vdaboot.configure +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -# Copyright (C) 2013,2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -# Change the "/" mount point to /dev/vda to use virtio disks. - -set -e - -if [ "$OPENSTACK_USER" ] -then - # Modifying fstab - if [ -f "$1/etc/fstab" ] - then - mv "$1/etc/fstab" "$1/etc/fstab.old" - awk 'BEGIN {print "/dev/vda / btrfs defaults,rw,noatime 0 1"}; - $2 != "/" {print $0 };' "$1/etc/fstab.old" > "$1/etc/fstab" - rm "$1/etc/fstab.old" - else - echo "/dev/vda / btrfs defaults,rw,noatime 0 1"> "$1/etc/fstab" - fi -fi diff --git a/virtualbox-ssh.check b/virtualbox-ssh.check deleted file mode 100755 index a97f3294..00000000 --- a/virtualbox-ssh.check +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2014-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - -'''Preparatory checks for Morph 'virtualbox-ssh' write extension''' - -import cliapp - -import morphlib.writeexts - - -class VirtualBoxPlusSshCheckExtension(morphlib.writeexts.WriteExtension): - def process_args(self, args): - if len(args) != 1: - raise cliapp.AppException('Wrong number of command line args') - - self.require_btrfs_in_deployment_host_kernel() - - upgrade = self.get_environment_boolean('UPGRADE') - if upgrade: - raise cliapp.AppException( - 'Use the `ssh-rsync` write extension to deploy upgrades to an ' - 'existing remote system.') - -VirtualBoxPlusSshCheckExtension().run() diff --git a/virtualbox-ssh.write b/virtualbox-ssh.write deleted file mode 100755 index 774f2b4f..00000000 --- a/virtualbox-ssh.write +++ /dev/null @@ -1,211 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2012-2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . - - -'''A Morph deployment write extension for deploying to VirtualBox via ssh. - -VirtualBox is assumed to be running on a remote machine, which is -accessed over ssh. The machine gets created, but not started. - -See file virtualbox-ssh.write.help for documentation - -''' - - -import cliapp -import os -import re -import sys -import time -import tempfile -import urlparse - -import morphlib.writeexts - - -class VirtualBoxPlusSshWriteExtension(morphlib.writeexts.WriteExtension): - - def process_args(self, args): - if len(args) != 2: - raise cliapp.AppException('Wrong number of command line args') - - temp_root, location = args - ssh_host, vm_name, vdi_path = self.parse_location(location) - autostart = self.get_environment_boolean('AUTOSTART') - - vagrant = self.get_environment_boolean('VAGRANT') - - fd, raw_disk = tempfile.mkstemp() - os.close(fd) - self.create_local_system(temp_root, raw_disk) - - try: - self.transfer_and_convert_to_vdi( - raw_disk, ssh_host, vdi_path) - self.create_virtualbox_guest(ssh_host, vm_name, vdi_path, - autostart, vagrant) - except BaseException: - sys.stderr.write('Error deploying to VirtualBox') - os.remove(raw_disk) - cliapp.ssh_runcmd(ssh_host, ['rm', '-f', vdi_path]) - raise - else: - os.remove(raw_disk) - self.status( - msg='Virtual machine %(vm_name)s has been created', - vm_name=vm_name) - - def parse_location(self, location): - '''Parse the location argument to get relevant data.''' - - x = urlparse.urlparse(location) - if x.scheme != 'vbox+ssh': - raise cliapp.AppException( - 'URL schema must be vbox+ssh in %s' % location) - m = re.match('^/(?P[^/]+)(?P/.+)$', x.path) - if not m: - raise cliapp.AppException('Cannot parse location %s' % location) - return x.netloc, m.group('guest'), m.group('path') - - def transfer_and_convert_to_vdi(self, raw_disk, ssh_host, vdi_path): - '''Transfer raw disk image to VirtualBox host, and convert to VDI.''' - - self.status(msg='Transfer disk and convert to VDI') - - st = os.lstat(raw_disk) - xfer_hole_path = morphlib.util.get_data_path('xfer-hole') - recv_hole = morphlib.util.get_data('recv-hole') - - ssh_remote_cmd = [ - 'sh', '-c', recv_hole, - 'dummy-argv0', 'vbox', vdi_path, str(st.st_size), - ] - - cliapp.runcmd( - ['python', xfer_hole_path, raw_disk], - ['ssh', ssh_host] + map(cliapp.shell_quote, ssh_remote_cmd), - stdout=None, stderr=None) - - def virtualbox_version(self, ssh_host): - 'Get the version number of the VirtualBox running on the remote host.' - - # --version gives a build id, which looks something like - # 1.2.3r456789, so we need to strip the suffix off and get a tuple - # of the (major, minor, patch) version, since comparing with a - # tuple is more reliable than a string and more convenient than - # comparing against the major, minor and patch numbers directly - self.status(msg='Checking version of remote VirtualBox') - build_id = cliapp.ssh_runcmd(ssh_host, ['VBoxManage', '--version']) - version_string = re.match(r"^([0-9\.]+).*$", build_id.strip()).group(1) - return tuple(int(s or '0') for s in version_string.split('.')) - - def create_virtualbox_guest(self, ssh_host, vm_name, vdi_path, autostart, - vagrant): - '''Create the VirtualBox virtual machine.''' - - self.status(msg='Create VirtualBox virtual machine') - - ram_mebibytes = str(self.get_ram_size() / (1024**2)) - - vcpu_count = str(self.get_vcpu_count()) - - if not vagrant: - hostonly_iface = self.get_host_interface(ssh_host) - - if self.virtualbox_version(ssh_host) < (4, 3, 0): - sataportcount_option = '--sataportcount' - else: - sataportcount_option = '--portcount' - - commands = [ - ['createvm', '--name', vm_name, '--ostype', 'Linux26_64', - '--register'], - ['modifyvm', vm_name, '--ioapic', 'on', - '--memory', ram_mebibytes, '--cpus', vcpu_count], - ['storagectl', vm_name, '--name', 'SATA Controller', - '--add', 'sata', '--bootable', 'on', sataportcount_option, '2'], - ['storageattach', vm_name, '--storagectl', 'SATA Controller', - '--port', '0', '--device', '0', '--type', 'hdd', '--medium', - vdi_path], - ] - if vagrant: - commands[1].extend(['--nic1', 'nat', - '--natnet1', 'default']) - else: - commands[1].extend(['--nic1', 'hostonly', - '--hostonlyadapter1', hostonly_iface, - '--nic2', 'nat', '--natnet2', 'default']) - - attach_disks = self.parse_attach_disks() - for device_no, disk in enumerate(attach_disks, 1): - cmd = ['storageattach', vm_name, - '--storagectl', 'SATA Controller', - '--port', str(device_no), - '--device', '0', - '--type', 'hdd', - '--medium', disk] - commands.append(cmd) - - if autostart: - commands.append(['startvm', vm_name]) - - for command in commands: - argv = ['VBoxManage'] + command - cliapp.ssh_runcmd(ssh_host, argv) - - def get_host_interface(self, ssh_host): - host_ipaddr = os.environ.get('HOST_IPADDR') - netmask = os.environ.get('NETMASK') - - if host_ipaddr is None: - raise cliapp.AppException('HOST_IPADDR was not given') - - if netmask is None: - raise cliapp.AppException('NETMASK was not given') - - # 'VBoxManage list hostonlyifs' retrieves a list with the hostonly - # interfaces on the host. For each interface, the following lines - # are shown on top: - # - # Name: vboxnet0 - # GUID: 786f6276-656e-4074-8000-0a0027000000 - # Dhcp: Disabled - # IPAddress: 192.168.100.1 - # - # The following command tries to retrieve the hostonly interface - # name (e.g. vboxnet0) associated with the given ip address. - iface = None - lines = cliapp.ssh_runcmd(ssh_host, - ['VBoxManage', 'list', 'hostonlyifs']).splitlines() - for i, v in enumerate(lines): - if host_ipaddr in v: - iface = lines[i-3].split()[1] - break - - if iface is None: - iface = cliapp.ssh_runcmd(ssh_host, - ['VBoxManage', 'hostonlyif', 'create']) - # 'VBoxManage hostonlyif create' shows the name of the - # created hostonly interface inside single quotes - iface = iface[iface.find("'") + 1 : iface.rfind("'")] - cliapp.ssh_runcmd(ssh_host, - ['VBoxManage', 'hostonlyif', - 'ipconfig', iface, - '--ip', host_ipaddr, - '--netmask', netmask]) - - return iface - -VirtualBoxPlusSshWriteExtension().run() diff --git a/virtualbox-ssh.write.help b/virtualbox-ssh.write.help deleted file mode 100644 index 2dbf988c..00000000 --- a/virtualbox-ssh.write.help +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (C) 2014, 2015 Codethink Limited -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, see . - -help: | - - Deploy a Baserock system as a *new* VirtualBox virtual machine. - (Use the `ssh-rsync` write extension to deploy upgrades to an *existing* - VM) - - Connects to HOST via ssh to run VirtualBox's command line management tools. - - Parameters: - - * location: a custom URL scheme of the form `vbox+ssh://HOST/GUEST/PATH`, - where: - * HOST is the name of the host on which VirtualBox is running - * GUEST is the name of the guest VM on that host - * PATH is the path to the disk image that should be created, - on that host. For example, - `vbox+ssh://alice@192.168.122.1/testsys/home/alice/testys.img` where - * `alice@192.168.122.1` is the target host as given to ssh, - **from within the development host** (which may be - different from the target host's normal address); - * `testsys` is the name of the new guest VM'; - * `/home/alice/testys.img` is the pathname of the disk image files - on the target host. - - * HOSTNAME=name: the hostname of the **guest** VM within the network into - which it is being deployed. - - * DISK_SIZE=X: **(MANDATORY)** the size of the VM's primary virtual hard - disk. `X` should use a suffix of `K`, `M`, or `G` (in upper or lower - case) to indicate kilo-, mega-, or gigabytes. For example, - `DISK_SIZE=100G` would create a 100 gigabyte virtual hard disk. - - * RAM_SIZE=X: The amount of RAM that the virtual machine should allocate - for itself from the host. `X` is interpreted in the same as for - DISK_SIZE, and defaults to `1G`. - - * VCPUS=n: the number of virtual CPUs for the VM. Allowed values 1-32. Do - not use more CPU cores than you have available physically (real cores, - no hyperthreads). - - * INITRAMFS_PATH=path: the location of an initramfs for the bootloader to - tell Linux to use, rather than booting the rootfs directly. - - * DTB_PATH=path: **(MANDATORY)** for systems that require a device tree - binary - Give the full path (without a leading /) to the location of the - DTB in the built system image . The deployment will fail if `path` does - not exist. - - * BOOTLOADER_INSTALL=value: the bootloader to be installed - **(MANDATORY)** for non-x86 systems - - allowed values = - - 'extlinux' (default) - the extlinux bootloader will - be installed - - 'none' - no bootloader will be installed by `morph deploy`. A - bootloader must be installed manually. This value must be used when - deploying non-x86 systems such as ARM. - - * BOOTLOADER_CONFIG_FORMAT=value: the bootloader format to be used. - If not specified for x86-32 and x86-64 systems, 'extlinux' will be used - - allowed values = - - 'extlinux' - - * KERNEL_ARGS=args: optional additional kernel command-line parameters to - be appended to the default set. The default set is: - - 'rw init=/sbin/init rootfstype=btrfs \ - rootflags=subvol=systems/default/run \ - root=[name or UUID of root filesystem]' - - (See https://www.kernel.org/doc/Documentation/kernel-parameters.txt) - - * AUTOSTART= - boolean. If it is set, the VM will be started when - it has been deployed. - - * VAGRANT= - boolean. If it is set, then networking is configured - so that the VM will work with Vagrant. Otherwise networking is - configured to run directly in VirtualBox. - - * HOST_IPADDR= - the IP address of the VM host. - - * NETMASK= - the netmask of the VM host. - - * NETWORK_CONFIG= - `net_config` is used to set up the VM's - network interfaces. It is a string containing semi-colon separated - 'stanzas' where each stanza provides information about a network - interface. Each stanza is of the form name:type[,arg=value] e.g. - - lo:loopback - eth0:dhcp - eth1:static,address=10.0.0.1,netmask=255.255.0.0 - - An example of the NETWORK_CONFIG parameter (It should be in one line) - - `"lo:loopback;eth0:static,address=192.168.100.2,netmask=255.255.255.0; - eth1:dhcp,hostname=$(hostname)"` - - It is useful to configure one interface to use NAT to give the VM access - to the outside world and another interface to use the Virtual Box host - adapter to allow you to access the Trove from the host machine. - - The NAT interface eth1 is set up to use dhcp, the host-only adapter - interface is configured statically. - - Note: you must give the host-only adapter interface an address that lies - **on the same network** as the host adapter. So if the host adapter has - an IP of 192.168.100.1 eth0 should have an address such as - 192.168.100.42. - - The settings of the host adapter, including its IP can be changed either - in the VirtualBox manager UI - (https://www.virtualbox.org/manual/ch03.html#settings-network) - or via the VBoxManage command line - (https://www.virtualbox.org/manual/ch08.html#idp57572192) - - See Chapter 6 of the VirtualBox User Manual for more information about - virtual networking (https://www.virtualbox.org/manual/ch06.html) - - (See `morph help deploy` for details of how to pass parameters to write - extensions) -- cgit v1.2.1 From 65ddf13f13ab68c1c5728e40dd81f1e8826ab0cf Mon Sep 17 00:00:00 2001 From: Adam Coldrick Date: Tue, 2 Jun 2015 08:23:24 +0000 Subject: Move chef-system-x86_64-container.morph into systems subdirectory Change-Id: I9a523f5bbe744225b1f5fe30f29d197596337290 --- chef-system-x86_64-container.morph | 32 ------------------------------ systems/chef-system-x86_64-container.morph | 32 ++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 32 deletions(-) delete mode 100644 chef-system-x86_64-container.morph create mode 100644 systems/chef-system-x86_64-container.morph diff --git a/chef-system-x86_64-container.morph b/chef-system-x86_64-container.morph deleted file mode 100644 index 889eabea..00000000 --- a/chef-system-x86_64-container.morph +++ /dev/null @@ -1,32 +0,0 @@ -name: chef-system-x86_64-container -kind: system -arch: x86_64 -description: Minimal chef system suitable for running in a container -configuration-extensions: -- extensions/set-hostname -- extensions/simple-network -- extensions/nfsboot -- extensions/install-files -- extensions/busybox-init -- extensions/remove-gcc -strata: -- name: build-essential - morph: strata/build-essential.morph - artifacts: - - build-essential-minimal -- name: core - morph: strata/core.morph - artifacts: - - core-openssl -- name: foundation - morph: strata/foundation.morph - artifacts: - - foundation-runtime -- name: ruby - morph: strata/ruby.morph - artifacts: - - ruby-runtime -- name: chef - morph: strata/chef.morph - artifacts: - - chef-runtime diff --git a/systems/chef-system-x86_64-container.morph b/systems/chef-system-x86_64-container.morph new file mode 100644 index 00000000..889eabea --- /dev/null +++ b/systems/chef-system-x86_64-container.morph @@ -0,0 +1,32 @@ +name: chef-system-x86_64-container +kind: system +arch: x86_64 +description: Minimal chef system suitable for running in a container +configuration-extensions: +- extensions/set-hostname +- extensions/simple-network +- extensions/nfsboot +- extensions/install-files +- extensions/busybox-init +- extensions/remove-gcc +strata: +- name: build-essential + morph: strata/build-essential.morph + artifacts: + - build-essential-minimal +- name: core + morph: strata/core.morph + artifacts: + - core-openssl +- name: foundation + morph: strata/foundation.morph + artifacts: + - foundation-runtime +- name: ruby + morph: strata/ruby.morph + artifacts: + - ruby-runtime +- name: chef + morph: strata/chef.morph + artifacts: + - chef-runtime -- cgit v1.2.1 From 02faf51e91a8c55adfbb6d953bca354ab99bf261 Mon Sep 17 00:00:00 2001 From: Adam Coldrick Date: Tue, 2 Jun 2015 08:40:27 +0000 Subject: Move all the directories used for install-files into a subdirectory Change-Id: I309c183ce8b9ff9d0f5ac4807244547f2cc4ddf5 --- chef/manifest | 3 - clusters/cephclient.morph | 2 +- clusters/ci.morph | 2 +- clusters/example-distbuild-cluster.morph | 2 +- clusters/example-swift-storage-cluster.morph | 2 +- clusters/mason-openstack.morph | 2 +- clusters/mason.morph | 2 +- clusters/moonshot-m2-armv8b64.morph | 4 +- clusters/moonshot-pxe-armv8b64.morph | 2 +- clusters/moonshot-pxe-armv8l64.morph | 2 +- clusters/openstack-one-node-swift.morph | 2 +- clusters/openstack-one-node.morph | 2 +- clusters/openstack-three-node-installer.morph | 2 +- clusters/openstack-two-node-installer.morph | 2 +- .../system-generators/ccache-nfs-mount-generator | 16 - distbuild/manifest | 28 - .../distbuild-setup/ansible/distbuild-setup.yml | 115 - distbuild/usr/lib/distbuild-setup/ansible/hosts | 1 - .../usr/lib/systemd/system/distbuild-setup.service | 16 - .../lib/systemd/system/morph-cache-server.service | 12 - .../systemd/system/morph-controller-helper.service | 13 - .../lib/systemd/system/morph-controller.service | 12 - .../lib/systemd/system/morph-worker-helper.service | 13 - .../usr/lib/systemd/system/morph-worker.service | 13 - .../distbuild-setup.service | 1 - .../share/distbuild-setup/morph-cache-server.conf | 5 - .../distbuild-setup/morph-controller-helper.conf | 5 - .../share/distbuild-setup/morph-controller.conf | 6 - .../share/distbuild-setup/morph-worker-helper.conf | 4 - .../usr/share/distbuild-setup/morph-worker.conf | 4 - distbuild/usr/share/distbuild-setup/morph.conf | 13 - essential-files/etc/inputrc | 38 - essential-files/etc/os-release | 5 - essential-files/etc/profile | 13 - essential-files/manifest | 8 - .../usr/lib/tmpfiles.d/shutdownramfs.conf | 4 - extensions/install-essential-files.configure | 2 +- genivi-devel-system-armv7/etc/morph.conf | 1 - genivi-devel-system-armv7/manifest | 5 - genivi-devel-system-armv7/src/morph.conf | 5 - install-files/chef/manifest | 3 + .../system-generators/ccache-nfs-mount-generator | 16 + install-files/distbuild/manifest | 28 + .../distbuild-setup/ansible/distbuild-setup.yml | 115 + .../usr/lib/distbuild-setup/ansible/hosts | 1 + .../usr/lib/systemd/system/distbuild-setup.service | 16 + .../lib/systemd/system/morph-cache-server.service | 12 + .../systemd/system/morph-controller-helper.service | 13 + .../lib/systemd/system/morph-controller.service | 12 + .../lib/systemd/system/morph-worker-helper.service | 13 + .../usr/lib/systemd/system/morph-worker.service | 13 + .../distbuild-setup.service | 1 + .../share/distbuild-setup/morph-cache-server.conf | 5 + .../distbuild-setup/morph-controller-helper.conf | 5 + .../share/distbuild-setup/morph-controller.conf | 6 + .../share/distbuild-setup/morph-worker-helper.conf | 4 + .../usr/share/distbuild-setup/morph-worker.conf | 4 + .../distbuild/usr/share/distbuild-setup/morph.conf | 13 + install-files/essential-files/etc/inputrc | 38 + install-files/essential-files/etc/os-release | 5 + install-files/essential-files/etc/profile | 13 + install-files/essential-files/manifest | 8 + .../usr/lib/tmpfiles.d/shutdownramfs.conf | 4 + .../genivi-devel-system-armv7/etc/morph.conf | 1 + install-files/genivi-devel-system-armv7/manifest | 5 + .../genivi-devel-system-armv7/src/morph.conf | 5 + install-files/moonshot/boot/m400-1003.dtb | Bin 0 -> 18063 bytes install-files/moonshot/manifest | 2 + .../openstack/etc/horizon/apache-horizon.conf | 34 + .../horizon/openstack_dashboard/local_settings.py | 551 +++ install-files/openstack/etc/tempest/tempest.conf | 1116 ++++++ install-files/openstack/manifest | 190 + .../openstack/usr/lib/sysctl.d/neutron.conf | 3 + .../usr/lib/systemd/system/apache-httpd.service | 16 + .../usr/lib/systemd/system/iscsi-setup.service | 12 + .../openstack-ceilometer-alarm-evaluator.service | 15 + .../openstack-ceilometer-alarm-notifier.service | 15 + .../system/openstack-ceilometer-api.service | 15 + .../system/openstack-ceilometer-central.service | 15 + .../system/openstack-ceilometer-collector.service | 15 + .../system/openstack-ceilometer-compute.service | 15 + .../openstack-ceilometer-config-setup.service | 11 + .../system/openstack-ceilometer-db-setup.service | 13 + .../openstack-ceilometer-notification.service | 15 + .../systemd/system/openstack-cinder-api.service | 15 + .../systemd/system/openstack-cinder-backup.service | 15 + .../system/openstack-cinder-config-setup.service | 11 + .../system/openstack-cinder-db-setup.service | 13 + .../system/openstack-cinder-lv-setup.service | 12 + .../system/openstack-cinder-scheduler.service | 15 + .../systemd/system/openstack-cinder-volume.service | 15 + .../systemd/system/openstack-glance-api.service | 16 + .../system/openstack-glance-registry.service | 16 + .../systemd/system/openstack-glance-setup.service | 11 + .../systemd/system/openstack-horizon-setup.service | 10 + .../systemd/system/openstack-ironic-api.service | 16 + .../system/openstack-ironic-conductor.service | 16 + .../systemd/system/openstack-ironic-setup.service | 12 + .../system/openstack-keystone-setup.service | 14 + .../lib/systemd/system/openstack-keystone.service | 16 + .../systemd/system/openstack-network-setup.service | 12 + .../system/openstack-neutron-config-setup.service | 13 + .../system/openstack-neutron-db-setup.service | 13 + .../system/openstack-neutron-dhcp-agent.service | 17 + .../system/openstack-neutron-l3-agent.service | 18 + .../openstack-neutron-metadata-agent.service | 17 + .../system/openstack-neutron-ovs-cleanup.service | 18 + ...nstack-neutron-plugin-openvswitch-agent.service | 17 + .../system/openstack-neutron-server.service | 17 + .../lib/systemd/system/openstack-nova-api.service | 15 + .../lib/systemd/system/openstack-nova-cert.service | 15 + .../systemd/system/openstack-nova-compute.service | 16 + .../system/openstack-nova-conductor.service | 16 + .../system/openstack-nova-config-setup.service | 11 + .../system/openstack-nova-consoleauth.service | 15 + .../systemd/system/openstack-nova-db-setup.service | 13 + .../system/openstack-nova-novncproxy.service | 15 + .../system/openstack-nova-scheduler.service | 15 + .../system/openstack-nova-serialproxy.service | 15 + .../systemd/system/openvswitch-db-server.service | 12 + .../lib/systemd/system/openvswitch-setup.service | 11 + .../usr/lib/systemd/system/openvswitch.service | 12 + .../systemd/system/postgres-server-setup.service | 12 + .../usr/lib/systemd/system/postgres-server.service | 26 + .../usr/lib/systemd/system/rabbitmq-server.service | 16 + .../systemd/system/swift-controller-setup.service | 13 + .../usr/lib/systemd/system/swift-proxy.service | 14 + .../usr/share/openstack/ceilometer-config.yml | 36 + .../usr/share/openstack/ceilometer-db.yml | 50 + .../usr/share/openstack/ceilometer/ceilometer.conf | 1023 ++++++ .../usr/share/openstack/cinder-config.yml | 37 + .../openstack/usr/share/openstack/cinder-db.yml | 60 + .../openstack/usr/share/openstack/cinder-lvs.yml | 21 + .../usr/share/openstack/cinder/api-paste.ini | 60 + .../usr/share/openstack/cinder/cinder.conf | 2825 +++++++++++++++ .../usr/share/openstack/cinder/policy.json | 80 + .../openstack/extras/00-disable-device.network | 2 + .../share/openstack/extras/60-device-dhcp.network | 5 + .../openstack/usr/share/openstack/glance.yml | 93 + .../share/openstack/glance/glance-api-paste.ini | 77 + .../usr/share/openstack/glance/glance-api.conf | 699 ++++ .../usr/share/openstack/glance/glance-cache.conf | 200 + .../openstack/glance/glance-registry-paste.ini | 30 + .../share/openstack/glance/glance-registry.conf | 245 ++ .../share/openstack/glance/glance-scrubber.conf | 108 + .../usr/share/openstack/glance/logging.conf | 54 + .../usr/share/openstack/glance/policy.json | 52 + .../usr/share/openstack/glance/schema-image.json | 28 + .../openstack/usr/share/openstack/horizon.yml | 47 + install-files/openstack/usr/share/openstack/hosts | 1 + .../openstack/usr/share/openstack/ironic.yml | 104 + .../usr/share/openstack/ironic/ironic.conf | 1247 +++++++ .../usr/share/openstack/ironic/policy.json | 5 + .../openstack/usr/share/openstack/iscsi.yml | 15 + .../openstack/usr/share/openstack/keystone.yml | 143 + .../share/openstack/keystone/keystone-paste.ini | 121 + .../usr/share/openstack/keystone/keystone.conf | 1588 ++++++++ .../usr/share/openstack/keystone/logging.conf | 65 + .../usr/share/openstack/keystone/policy.json | 171 + .../openstack/usr/share/openstack/network.yml | 67 + .../usr/share/openstack/neutron-config.yml | 48 + .../openstack/usr/share/openstack/neutron-db.yml | 51 + .../usr/share/openstack/neutron/api-paste.ini | 30 + .../usr/share/openstack/neutron/dhcp_agent.ini | 89 + .../usr/share/openstack/neutron/fwaas_driver.ini | 3 + .../usr/share/openstack/neutron/l3_agent.ini | 103 + .../usr/share/openstack/neutron/lbaas_agent.ini | 42 + .../usr/share/openstack/neutron/metadata_agent.ini | 60 + .../usr/share/openstack/neutron/metering_agent.ini | 18 + .../usr/share/openstack/neutron/neutron.conf | 640 ++++ .../neutron/plugins/bigswitch/restproxy.ini | 114 + .../neutron/plugins/bigswitch/ssl/ca_certs/README | 3 + .../plugins/bigswitch/ssl/host_certs/README | 6 + .../openstack/neutron/plugins/brocade/brocade.ini | 29 + .../neutron/plugins/cisco/cisco_cfg_agent.ini | 15 + .../neutron/plugins/cisco/cisco_plugins.ini | 100 + .../neutron/plugins/cisco/cisco_router_plugin.ini | 76 + .../neutron/plugins/cisco/cisco_vpn_agent.ini | 26 + .../neutron/plugins/embrane/heleos_conf.ini | 41 + .../plugins/hyperv/hyperv_neutron_plugin.ini | 63 + .../neutron/plugins/ibm/sdnve_neutron_plugin.ini | 50 + .../plugins/linuxbridge/linuxbridge_conf.ini | 78 + .../neutron/plugins/metaplugin/metaplugin.ini | 31 + .../openstack/neutron/plugins/midonet/midonet.ini | 19 + .../openstack/neutron/plugins/ml2/ml2_conf.ini | 86 + .../neutron/plugins/ml2/ml2_conf_arista.ini | 100 + .../neutron/plugins/ml2/ml2_conf_brocade.ini | 15 + .../neutron/plugins/ml2/ml2_conf_cisco.ini | 118 + .../neutron/plugins/ml2/ml2_conf_fslsdn.ini | 52 + .../neutron/plugins/ml2/ml2_conf_mlnx.ini | 4 + .../openstack/neutron/plugins/ml2/ml2_conf_ncs.ini | 28 + .../openstack/neutron/plugins/ml2/ml2_conf_odl.ini | 30 + .../openstack/neutron/plugins/ml2/ml2_conf_ofa.ini | 13 + .../neutron/plugins/ml2/ml2_conf_sriov.ini | 31 + .../openstack/neutron/plugins/mlnx/mlnx_conf.ini | 79 + .../share/openstack/neutron/plugins/nec/nec.ini | 60 + .../neutron/plugins/nuage/nuage_plugin.ini | 41 + .../neutron/plugins/oneconvergence/nvsdplugin.ini | 35 + .../plugins/opencontrail/contrailplugin.ini | 26 + .../plugins/openvswitch/ovs_neutron_plugin.ini | 190 + .../neutron/plugins/plumgrid/plumgrid.ini | 14 + .../share/openstack/neutron/plugins/ryu/ryu.ini | 44 + .../share/openstack/neutron/plugins/vmware/nsx.ini | 200 + .../usr/share/openstack/neutron/policy.json | 138 + .../usr/share/openstack/neutron/vpn_agent.ini | 14 + .../openstack/usr/share/openstack/nova-config.yml | 34 + .../openstack/usr/share/openstack/nova-db.yml | 51 + .../usr/share/openstack/nova/api-paste.ini | 118 + .../openstack/usr/share/openstack/nova/cells.json | 26 + .../usr/share/openstack/nova/logging.conf | 81 + .../usr/share/openstack/nova/nova-compute.conf | 4 + .../openstack/usr/share/openstack/nova/nova.conf | 3809 ++++++++++++++++++++ .../openstack/usr/share/openstack/nova/policy.json | 324 ++ .../openstack/usr/share/openstack/openvswitch.yml | 38 + .../openstack/usr/share/openstack/postgres.yml | 48 + .../usr/share/openstack/postgres/pg_hba.conf | 5 + .../usr/share/openstack/postgres/postgresql.conf | 11 + .../usr/share/openstack/rabbitmq/rabbitmq-env.conf | 3 + .../usr/share/openstack/rabbitmq/rabbitmq.config | 9 + .../usr/share/openstack/swift-controller.yml | 52 + .../openstack/usr/share/swift/etc/rsyncd.j2 | 23 + .../usr/share/swift/etc/swift/proxy-server.j2 | 630 ++++ install-files/swift/etc/ntp.conf | 25 + install-files/swift/manifest | 15 + .../swift/usr/lib/systemd/system/rsync.service | 11 + .../lib/systemd/system/swift-storage-setup.service | 12 + .../usr/lib/systemd/system/swift-storage.service | 12 + install-files/swift/usr/share/swift/etc/rsyncd.j2 | 23 + .../usr/share/swift/etc/swift/account-server.j2 | 192 + .../usr/share/swift/etc/swift/container-server.j2 | 203 ++ .../usr/share/swift/etc/swift/object-server.j2 | 283 ++ .../swift/usr/share/swift/etc/swift/swift.j2 | 118 + install-files/swift/usr/share/swift/hosts | 1 + .../swift/usr/share/swift/swift-storage.yml | 24 + .../home/vagrant/.ssh/authorized_keys | 1 + install-files/vagrant-files/manifest | 4 + moonshot/boot/m400-1003.dtb | Bin 18063 -> 0 bytes moonshot/manifest | 2 - openstack/etc/horizon/apache-horizon.conf | 34 - .../horizon/openstack_dashboard/local_settings.py | 551 --- openstack/etc/tempest/tempest.conf | 1116 ------ openstack/manifest | 190 - openstack/usr/lib/sysctl.d/neutron.conf | 3 - .../usr/lib/systemd/system/apache-httpd.service | 16 - .../usr/lib/systemd/system/iscsi-setup.service | 12 - .../openstack-ceilometer-alarm-evaluator.service | 15 - .../openstack-ceilometer-alarm-notifier.service | 15 - .../system/openstack-ceilometer-api.service | 15 - .../system/openstack-ceilometer-central.service | 15 - .../system/openstack-ceilometer-collector.service | 15 - .../system/openstack-ceilometer-compute.service | 15 - .../openstack-ceilometer-config-setup.service | 11 - .../system/openstack-ceilometer-db-setup.service | 13 - .../openstack-ceilometer-notification.service | 15 - .../systemd/system/openstack-cinder-api.service | 15 - .../systemd/system/openstack-cinder-backup.service | 15 - .../system/openstack-cinder-config-setup.service | 11 - .../system/openstack-cinder-db-setup.service | 13 - .../system/openstack-cinder-lv-setup.service | 12 - .../system/openstack-cinder-scheduler.service | 15 - .../systemd/system/openstack-cinder-volume.service | 15 - .../systemd/system/openstack-glance-api.service | 16 - .../system/openstack-glance-registry.service | 16 - .../systemd/system/openstack-glance-setup.service | 11 - .../systemd/system/openstack-horizon-setup.service | 10 - .../systemd/system/openstack-ironic-api.service | 16 - .../system/openstack-ironic-conductor.service | 16 - .../systemd/system/openstack-ironic-setup.service | 12 - .../system/openstack-keystone-setup.service | 14 - .../lib/systemd/system/openstack-keystone.service | 16 - .../systemd/system/openstack-network-setup.service | 12 - .../system/openstack-neutron-config-setup.service | 13 - .../system/openstack-neutron-db-setup.service | 13 - .../system/openstack-neutron-dhcp-agent.service | 17 - .../system/openstack-neutron-l3-agent.service | 18 - .../openstack-neutron-metadata-agent.service | 17 - .../system/openstack-neutron-ovs-cleanup.service | 18 - ...nstack-neutron-plugin-openvswitch-agent.service | 17 - .../system/openstack-neutron-server.service | 17 - .../lib/systemd/system/openstack-nova-api.service | 15 - .../lib/systemd/system/openstack-nova-cert.service | 15 - .../systemd/system/openstack-nova-compute.service | 16 - .../system/openstack-nova-conductor.service | 16 - .../system/openstack-nova-config-setup.service | 11 - .../system/openstack-nova-consoleauth.service | 15 - .../systemd/system/openstack-nova-db-setup.service | 13 - .../system/openstack-nova-novncproxy.service | 15 - .../system/openstack-nova-scheduler.service | 15 - .../system/openstack-nova-serialproxy.service | 15 - .../systemd/system/openvswitch-db-server.service | 12 - .../lib/systemd/system/openvswitch-setup.service | 11 - .../usr/lib/systemd/system/openvswitch.service | 12 - .../systemd/system/postgres-server-setup.service | 12 - .../usr/lib/systemd/system/postgres-server.service | 26 - .../usr/lib/systemd/system/rabbitmq-server.service | 16 - .../systemd/system/swift-controller-setup.service | 13 - .../usr/lib/systemd/system/swift-proxy.service | 14 - .../usr/share/openstack/ceilometer-config.yml | 36 - openstack/usr/share/openstack/ceilometer-db.yml | 50 - .../usr/share/openstack/ceilometer/ceilometer.conf | 1023 ------ openstack/usr/share/openstack/cinder-config.yml | 37 - openstack/usr/share/openstack/cinder-db.yml | 60 - openstack/usr/share/openstack/cinder-lvs.yml | 21 - openstack/usr/share/openstack/cinder/api-paste.ini | 60 - openstack/usr/share/openstack/cinder/cinder.conf | 2825 --------------- openstack/usr/share/openstack/cinder/policy.json | 80 - .../openstack/extras/00-disable-device.network | 2 - .../share/openstack/extras/60-device-dhcp.network | 5 - openstack/usr/share/openstack/glance.yml | 93 - .../share/openstack/glance/glance-api-paste.ini | 77 - .../usr/share/openstack/glance/glance-api.conf | 699 ---- .../usr/share/openstack/glance/glance-cache.conf | 200 - .../openstack/glance/glance-registry-paste.ini | 30 - .../share/openstack/glance/glance-registry.conf | 245 -- .../share/openstack/glance/glance-scrubber.conf | 108 - openstack/usr/share/openstack/glance/logging.conf | 54 - openstack/usr/share/openstack/glance/policy.json | 52 - .../usr/share/openstack/glance/schema-image.json | 28 - openstack/usr/share/openstack/horizon.yml | 47 - openstack/usr/share/openstack/hosts | 1 - openstack/usr/share/openstack/ironic.yml | 104 - openstack/usr/share/openstack/ironic/ironic.conf | 1247 ------- openstack/usr/share/openstack/ironic/policy.json | 5 - openstack/usr/share/openstack/iscsi.yml | 15 - openstack/usr/share/openstack/keystone.yml | 143 - .../share/openstack/keystone/keystone-paste.ini | 121 - .../usr/share/openstack/keystone/keystone.conf | 1588 -------- .../usr/share/openstack/keystone/logging.conf | 65 - openstack/usr/share/openstack/keystone/policy.json | 171 - openstack/usr/share/openstack/network.yml | 67 - openstack/usr/share/openstack/neutron-config.yml | 48 - openstack/usr/share/openstack/neutron-db.yml | 51 - .../usr/share/openstack/neutron/api-paste.ini | 30 - .../usr/share/openstack/neutron/dhcp_agent.ini | 89 - .../usr/share/openstack/neutron/fwaas_driver.ini | 3 - openstack/usr/share/openstack/neutron/l3_agent.ini | 103 - .../usr/share/openstack/neutron/lbaas_agent.ini | 42 - .../usr/share/openstack/neutron/metadata_agent.ini | 60 - .../usr/share/openstack/neutron/metering_agent.ini | 18 - openstack/usr/share/openstack/neutron/neutron.conf | 640 ---- .../neutron/plugins/bigswitch/restproxy.ini | 114 - .../neutron/plugins/bigswitch/ssl/ca_certs/README | 3 - .../plugins/bigswitch/ssl/host_certs/README | 6 - .../openstack/neutron/plugins/brocade/brocade.ini | 29 - .../neutron/plugins/cisco/cisco_cfg_agent.ini | 15 - .../neutron/plugins/cisco/cisco_plugins.ini | 100 - .../neutron/plugins/cisco/cisco_router_plugin.ini | 76 - .../neutron/plugins/cisco/cisco_vpn_agent.ini | 26 - .../neutron/plugins/embrane/heleos_conf.ini | 41 - .../plugins/hyperv/hyperv_neutron_plugin.ini | 63 - .../neutron/plugins/ibm/sdnve_neutron_plugin.ini | 50 - .../plugins/linuxbridge/linuxbridge_conf.ini | 78 - .../neutron/plugins/metaplugin/metaplugin.ini | 31 - .../openstack/neutron/plugins/midonet/midonet.ini | 19 - .../openstack/neutron/plugins/ml2/ml2_conf.ini | 86 - .../neutron/plugins/ml2/ml2_conf_arista.ini | 100 - .../neutron/plugins/ml2/ml2_conf_brocade.ini | 15 - .../neutron/plugins/ml2/ml2_conf_cisco.ini | 118 - .../neutron/plugins/ml2/ml2_conf_fslsdn.ini | 52 - .../neutron/plugins/ml2/ml2_conf_mlnx.ini | 4 - .../openstack/neutron/plugins/ml2/ml2_conf_ncs.ini | 28 - .../openstack/neutron/plugins/ml2/ml2_conf_odl.ini | 30 - .../openstack/neutron/plugins/ml2/ml2_conf_ofa.ini | 13 - .../neutron/plugins/ml2/ml2_conf_sriov.ini | 31 - .../openstack/neutron/plugins/mlnx/mlnx_conf.ini | 79 - .../share/openstack/neutron/plugins/nec/nec.ini | 60 - .../neutron/plugins/nuage/nuage_plugin.ini | 41 - .../neutron/plugins/oneconvergence/nvsdplugin.ini | 35 - .../plugins/opencontrail/contrailplugin.ini | 26 - .../plugins/openvswitch/ovs_neutron_plugin.ini | 190 - .../neutron/plugins/plumgrid/plumgrid.ini | 14 - .../share/openstack/neutron/plugins/ryu/ryu.ini | 44 - .../share/openstack/neutron/plugins/vmware/nsx.ini | 200 - openstack/usr/share/openstack/neutron/policy.json | 138 - .../usr/share/openstack/neutron/vpn_agent.ini | 14 - openstack/usr/share/openstack/nova-config.yml | 34 - openstack/usr/share/openstack/nova-db.yml | 51 - openstack/usr/share/openstack/nova/api-paste.ini | 118 - openstack/usr/share/openstack/nova/cells.json | 26 - openstack/usr/share/openstack/nova/logging.conf | 81 - .../usr/share/openstack/nova/nova-compute.conf | 4 - openstack/usr/share/openstack/nova/nova.conf | 3809 -------------------- openstack/usr/share/openstack/nova/policy.json | 324 -- openstack/usr/share/openstack/openvswitch.yml | 38 - openstack/usr/share/openstack/postgres.yml | 48 - openstack/usr/share/openstack/postgres/pg_hba.conf | 5 - .../usr/share/openstack/postgres/postgresql.conf | 11 - .../usr/share/openstack/rabbitmq/rabbitmq-env.conf | 3 - .../usr/share/openstack/rabbitmq/rabbitmq.config | 9 - openstack/usr/share/openstack/swift-controller.yml | 52 - openstack/usr/share/swift/etc/rsyncd.j2 | 23 - .../usr/share/swift/etc/swift/proxy-server.j2 | 630 ---- swift/etc/ntp.conf | 25 - swift/manifest | 15 - swift/usr/lib/systemd/system/rsync.service | 11 - .../lib/systemd/system/swift-storage-setup.service | 12 - swift/usr/lib/systemd/system/swift-storage.service | 12 - swift/usr/share/swift/etc/rsyncd.j2 | 23 - swift/usr/share/swift/etc/swift/account-server.j2 | 192 - .../usr/share/swift/etc/swift/container-server.j2 | 203 -- swift/usr/share/swift/etc/swift/object-server.j2 | 283 -- swift/usr/share/swift/etc/swift/swift.j2 | 118 - swift/usr/share/swift/hosts | 1 - swift/usr/share/swift/swift-storage.yml | 24 - vagrant-files/home/vagrant/.ssh/authorized_keys | 1 - vagrant-files/manifest | 4 - 406 files changed, 21656 insertions(+), 21656 deletions(-) delete mode 100644 chef/manifest delete mode 100755 distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator delete mode 100644 distbuild/manifest delete mode 100644 distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml delete mode 100644 distbuild/usr/lib/distbuild-setup/ansible/hosts delete mode 100644 distbuild/usr/lib/systemd/system/distbuild-setup.service delete mode 100644 distbuild/usr/lib/systemd/system/morph-cache-server.service delete mode 100644 distbuild/usr/lib/systemd/system/morph-controller-helper.service delete mode 100644 distbuild/usr/lib/systemd/system/morph-controller.service delete mode 100644 distbuild/usr/lib/systemd/system/morph-worker-helper.service delete mode 100644 distbuild/usr/lib/systemd/system/morph-worker.service delete mode 120000 distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service delete mode 100644 distbuild/usr/share/distbuild-setup/morph-cache-server.conf delete mode 100644 distbuild/usr/share/distbuild-setup/morph-controller-helper.conf delete mode 100644 distbuild/usr/share/distbuild-setup/morph-controller.conf delete mode 100644 distbuild/usr/share/distbuild-setup/morph-worker-helper.conf delete mode 100644 distbuild/usr/share/distbuild-setup/morph-worker.conf delete mode 100644 distbuild/usr/share/distbuild-setup/morph.conf delete mode 100644 essential-files/etc/inputrc delete mode 100644 essential-files/etc/os-release delete mode 100644 essential-files/etc/profile delete mode 100644 essential-files/manifest delete mode 100644 essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf delete mode 120000 genivi-devel-system-armv7/etc/morph.conf delete mode 100644 genivi-devel-system-armv7/manifest delete mode 100644 genivi-devel-system-armv7/src/morph.conf create mode 100644 install-files/chef/manifest create mode 100755 install-files/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator create mode 100644 install-files/distbuild/manifest create mode 100644 install-files/distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml create mode 100644 install-files/distbuild/usr/lib/distbuild-setup/ansible/hosts create mode 100644 install-files/distbuild/usr/lib/systemd/system/distbuild-setup.service create mode 100644 install-files/distbuild/usr/lib/systemd/system/morph-cache-server.service create mode 100644 install-files/distbuild/usr/lib/systemd/system/morph-controller-helper.service create mode 100644 install-files/distbuild/usr/lib/systemd/system/morph-controller.service create mode 100644 install-files/distbuild/usr/lib/systemd/system/morph-worker-helper.service create mode 100644 install-files/distbuild/usr/lib/systemd/system/morph-worker.service create mode 120000 install-files/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service create mode 100644 install-files/distbuild/usr/share/distbuild-setup/morph-cache-server.conf create mode 100644 install-files/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf create mode 100644 install-files/distbuild/usr/share/distbuild-setup/morph-controller.conf create mode 100644 install-files/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf create mode 100644 install-files/distbuild/usr/share/distbuild-setup/morph-worker.conf create mode 100644 install-files/distbuild/usr/share/distbuild-setup/morph.conf create mode 100644 install-files/essential-files/etc/inputrc create mode 100644 install-files/essential-files/etc/os-release create mode 100644 install-files/essential-files/etc/profile create mode 100644 install-files/essential-files/manifest create mode 100644 install-files/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf create mode 120000 install-files/genivi-devel-system-armv7/etc/morph.conf create mode 100644 install-files/genivi-devel-system-armv7/manifest create mode 100644 install-files/genivi-devel-system-armv7/src/morph.conf create mode 100644 install-files/moonshot/boot/m400-1003.dtb create mode 100644 install-files/moonshot/manifest create mode 100644 install-files/openstack/etc/horizon/apache-horizon.conf create mode 100644 install-files/openstack/etc/horizon/openstack_dashboard/local_settings.py create mode 100644 install-files/openstack/etc/tempest/tempest.conf create mode 100644 install-files/openstack/manifest create mode 100644 install-files/openstack/usr/lib/sysctl.d/neutron.conf create mode 100644 install-files/openstack/usr/lib/systemd/system/apache-httpd.service create mode 100644 install-files/openstack/usr/lib/systemd/system/iscsi-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-cinder-api.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-cinder-backup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-cinder-volume.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-glance-api.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-glance-registry.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-glance-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-horizon-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ironic-api.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-ironic-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-keystone-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-keystone.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-network-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-neutron-server.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-api.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-cert.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-compute.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-conductor.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openvswitch-db-server.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openvswitch-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/openvswitch.service create mode 100644 install-files/openstack/usr/lib/systemd/system/postgres-server-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/postgres-server.service create mode 100644 install-files/openstack/usr/lib/systemd/system/rabbitmq-server.service create mode 100644 install-files/openstack/usr/lib/systemd/system/swift-controller-setup.service create mode 100644 install-files/openstack/usr/lib/systemd/system/swift-proxy.service create mode 100644 install-files/openstack/usr/share/openstack/ceilometer-config.yml create mode 100644 install-files/openstack/usr/share/openstack/ceilometer-db.yml create mode 100644 install-files/openstack/usr/share/openstack/ceilometer/ceilometer.conf create mode 100644 install-files/openstack/usr/share/openstack/cinder-config.yml create mode 100644 install-files/openstack/usr/share/openstack/cinder-db.yml create mode 100644 install-files/openstack/usr/share/openstack/cinder-lvs.yml create mode 100644 install-files/openstack/usr/share/openstack/cinder/api-paste.ini create mode 100644 install-files/openstack/usr/share/openstack/cinder/cinder.conf create mode 100644 install-files/openstack/usr/share/openstack/cinder/policy.json create mode 100644 install-files/openstack/usr/share/openstack/extras/00-disable-device.network create mode 100644 install-files/openstack/usr/share/openstack/extras/60-device-dhcp.network create mode 100644 install-files/openstack/usr/share/openstack/glance.yml create mode 100644 install-files/openstack/usr/share/openstack/glance/glance-api-paste.ini create mode 100644 install-files/openstack/usr/share/openstack/glance/glance-api.conf create mode 100644 install-files/openstack/usr/share/openstack/glance/glance-cache.conf create mode 100644 install-files/openstack/usr/share/openstack/glance/glance-registry-paste.ini create mode 100644 install-files/openstack/usr/share/openstack/glance/glance-registry.conf create mode 100644 install-files/openstack/usr/share/openstack/glance/glance-scrubber.conf create mode 100644 install-files/openstack/usr/share/openstack/glance/logging.conf create mode 100644 install-files/openstack/usr/share/openstack/glance/policy.json create mode 100644 install-files/openstack/usr/share/openstack/glance/schema-image.json create mode 100644 install-files/openstack/usr/share/openstack/horizon.yml create mode 100644 install-files/openstack/usr/share/openstack/hosts create mode 100644 install-files/openstack/usr/share/openstack/ironic.yml create mode 100644 install-files/openstack/usr/share/openstack/ironic/ironic.conf create mode 100644 install-files/openstack/usr/share/openstack/ironic/policy.json create mode 100644 install-files/openstack/usr/share/openstack/iscsi.yml create mode 100644 install-files/openstack/usr/share/openstack/keystone.yml create mode 100644 install-files/openstack/usr/share/openstack/keystone/keystone-paste.ini create mode 100644 install-files/openstack/usr/share/openstack/keystone/keystone.conf create mode 100644 install-files/openstack/usr/share/openstack/keystone/logging.conf create mode 100644 install-files/openstack/usr/share/openstack/keystone/policy.json create mode 100644 install-files/openstack/usr/share/openstack/network.yml create mode 100644 install-files/openstack/usr/share/openstack/neutron-config.yml create mode 100644 install-files/openstack/usr/share/openstack/neutron-db.yml create mode 100644 install-files/openstack/usr/share/openstack/neutron/api-paste.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/dhcp_agent.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/fwaas_driver.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/l3_agent.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/lbaas_agent.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/metadata_agent.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/metering_agent.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/neutron.conf create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini create mode 100644 install-files/openstack/usr/share/openstack/neutron/policy.json create mode 100644 install-files/openstack/usr/share/openstack/neutron/vpn_agent.ini create mode 100644 install-files/openstack/usr/share/openstack/nova-config.yml create mode 100644 install-files/openstack/usr/share/openstack/nova-db.yml create mode 100644 install-files/openstack/usr/share/openstack/nova/api-paste.ini create mode 100644 install-files/openstack/usr/share/openstack/nova/cells.json create mode 100644 install-files/openstack/usr/share/openstack/nova/logging.conf create mode 100644 install-files/openstack/usr/share/openstack/nova/nova-compute.conf create mode 100644 install-files/openstack/usr/share/openstack/nova/nova.conf create mode 100644 install-files/openstack/usr/share/openstack/nova/policy.json create mode 100644 install-files/openstack/usr/share/openstack/openvswitch.yml create mode 100644 install-files/openstack/usr/share/openstack/postgres.yml create mode 100644 install-files/openstack/usr/share/openstack/postgres/pg_hba.conf create mode 100644 install-files/openstack/usr/share/openstack/postgres/postgresql.conf create mode 100644 install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf create mode 100644 install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq.config create mode 100644 install-files/openstack/usr/share/openstack/swift-controller.yml create mode 100644 install-files/openstack/usr/share/swift/etc/rsyncd.j2 create mode 100644 install-files/openstack/usr/share/swift/etc/swift/proxy-server.j2 create mode 100644 install-files/swift/etc/ntp.conf create mode 100644 install-files/swift/manifest create mode 100644 install-files/swift/usr/lib/systemd/system/rsync.service create mode 100644 install-files/swift/usr/lib/systemd/system/swift-storage-setup.service create mode 100644 install-files/swift/usr/lib/systemd/system/swift-storage.service create mode 100644 install-files/swift/usr/share/swift/etc/rsyncd.j2 create mode 100644 install-files/swift/usr/share/swift/etc/swift/account-server.j2 create mode 100644 install-files/swift/usr/share/swift/etc/swift/container-server.j2 create mode 100644 install-files/swift/usr/share/swift/etc/swift/object-server.j2 create mode 100644 install-files/swift/usr/share/swift/etc/swift/swift.j2 create mode 100644 install-files/swift/usr/share/swift/hosts create mode 100644 install-files/swift/usr/share/swift/swift-storage.yml create mode 100644 install-files/vagrant-files/home/vagrant/.ssh/authorized_keys create mode 100644 install-files/vagrant-files/manifest delete mode 100644 moonshot/boot/m400-1003.dtb delete mode 100644 moonshot/manifest delete mode 100644 openstack/etc/horizon/apache-horizon.conf delete mode 100644 openstack/etc/horizon/openstack_dashboard/local_settings.py delete mode 100644 openstack/etc/tempest/tempest.conf delete mode 100644 openstack/manifest delete mode 100644 openstack/usr/lib/sysctl.d/neutron.conf delete mode 100644 openstack/usr/lib/systemd/system/apache-httpd.service delete mode 100644 openstack/usr/lib/systemd/system/iscsi-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ceilometer-api.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ceilometer-central.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-cinder-api.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-cinder-backup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-cinder-volume.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-glance-api.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-glance-registry.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-glance-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-horizon-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ironic-api.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ironic-conductor.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-ironic-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-keystone-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-keystone.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-network-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-neutron-server.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-api.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-cert.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-compute.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-conductor.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-config-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-db-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-scheduler.service delete mode 100644 openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service delete mode 100644 openstack/usr/lib/systemd/system/openvswitch-db-server.service delete mode 100644 openstack/usr/lib/systemd/system/openvswitch-setup.service delete mode 100644 openstack/usr/lib/systemd/system/openvswitch.service delete mode 100644 openstack/usr/lib/systemd/system/postgres-server-setup.service delete mode 100644 openstack/usr/lib/systemd/system/postgres-server.service delete mode 100644 openstack/usr/lib/systemd/system/rabbitmq-server.service delete mode 100644 openstack/usr/lib/systemd/system/swift-controller-setup.service delete mode 100644 openstack/usr/lib/systemd/system/swift-proxy.service delete mode 100644 openstack/usr/share/openstack/ceilometer-config.yml delete mode 100644 openstack/usr/share/openstack/ceilometer-db.yml delete mode 100644 openstack/usr/share/openstack/ceilometer/ceilometer.conf delete mode 100644 openstack/usr/share/openstack/cinder-config.yml delete mode 100644 openstack/usr/share/openstack/cinder-db.yml delete mode 100644 openstack/usr/share/openstack/cinder-lvs.yml delete mode 100644 openstack/usr/share/openstack/cinder/api-paste.ini delete mode 100644 openstack/usr/share/openstack/cinder/cinder.conf delete mode 100644 openstack/usr/share/openstack/cinder/policy.json delete mode 100644 openstack/usr/share/openstack/extras/00-disable-device.network delete mode 100644 openstack/usr/share/openstack/extras/60-device-dhcp.network delete mode 100644 openstack/usr/share/openstack/glance.yml delete mode 100644 openstack/usr/share/openstack/glance/glance-api-paste.ini delete mode 100644 openstack/usr/share/openstack/glance/glance-api.conf delete mode 100644 openstack/usr/share/openstack/glance/glance-cache.conf delete mode 100644 openstack/usr/share/openstack/glance/glance-registry-paste.ini delete mode 100644 openstack/usr/share/openstack/glance/glance-registry.conf delete mode 100644 openstack/usr/share/openstack/glance/glance-scrubber.conf delete mode 100644 openstack/usr/share/openstack/glance/logging.conf delete mode 100644 openstack/usr/share/openstack/glance/policy.json delete mode 100644 openstack/usr/share/openstack/glance/schema-image.json delete mode 100644 openstack/usr/share/openstack/horizon.yml delete mode 100644 openstack/usr/share/openstack/hosts delete mode 100644 openstack/usr/share/openstack/ironic.yml delete mode 100644 openstack/usr/share/openstack/ironic/ironic.conf delete mode 100644 openstack/usr/share/openstack/ironic/policy.json delete mode 100644 openstack/usr/share/openstack/iscsi.yml delete mode 100644 openstack/usr/share/openstack/keystone.yml delete mode 100644 openstack/usr/share/openstack/keystone/keystone-paste.ini delete mode 100644 openstack/usr/share/openstack/keystone/keystone.conf delete mode 100644 openstack/usr/share/openstack/keystone/logging.conf delete mode 100644 openstack/usr/share/openstack/keystone/policy.json delete mode 100644 openstack/usr/share/openstack/network.yml delete mode 100644 openstack/usr/share/openstack/neutron-config.yml delete mode 100644 openstack/usr/share/openstack/neutron-db.yml delete mode 100644 openstack/usr/share/openstack/neutron/api-paste.ini delete mode 100644 openstack/usr/share/openstack/neutron/dhcp_agent.ini delete mode 100644 openstack/usr/share/openstack/neutron/fwaas_driver.ini delete mode 100644 openstack/usr/share/openstack/neutron/l3_agent.ini delete mode 100644 openstack/usr/share/openstack/neutron/lbaas_agent.ini delete mode 100644 openstack/usr/share/openstack/neutron/metadata_agent.ini delete mode 100644 openstack/usr/share/openstack/neutron/metering_agent.ini delete mode 100644 openstack/usr/share/openstack/neutron/neutron.conf delete mode 100644 openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README delete mode 100644 openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README delete mode 100644 openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/nec/nec.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini delete mode 100644 openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini delete mode 100644 openstack/usr/share/openstack/neutron/policy.json delete mode 100644 openstack/usr/share/openstack/neutron/vpn_agent.ini delete mode 100644 openstack/usr/share/openstack/nova-config.yml delete mode 100644 openstack/usr/share/openstack/nova-db.yml delete mode 100644 openstack/usr/share/openstack/nova/api-paste.ini delete mode 100644 openstack/usr/share/openstack/nova/cells.json delete mode 100644 openstack/usr/share/openstack/nova/logging.conf delete mode 100644 openstack/usr/share/openstack/nova/nova-compute.conf delete mode 100644 openstack/usr/share/openstack/nova/nova.conf delete mode 100644 openstack/usr/share/openstack/nova/policy.json delete mode 100644 openstack/usr/share/openstack/openvswitch.yml delete mode 100644 openstack/usr/share/openstack/postgres.yml delete mode 100644 openstack/usr/share/openstack/postgres/pg_hba.conf delete mode 100644 openstack/usr/share/openstack/postgres/postgresql.conf delete mode 100644 openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf delete mode 100644 openstack/usr/share/openstack/rabbitmq/rabbitmq.config delete mode 100644 openstack/usr/share/openstack/swift-controller.yml delete mode 100644 openstack/usr/share/swift/etc/rsyncd.j2 delete mode 100644 openstack/usr/share/swift/etc/swift/proxy-server.j2 delete mode 100644 swift/etc/ntp.conf delete mode 100644 swift/manifest delete mode 100644 swift/usr/lib/systemd/system/rsync.service delete mode 100644 swift/usr/lib/systemd/system/swift-storage-setup.service delete mode 100644 swift/usr/lib/systemd/system/swift-storage.service delete mode 100644 swift/usr/share/swift/etc/rsyncd.j2 delete mode 100644 swift/usr/share/swift/etc/swift/account-server.j2 delete mode 100644 swift/usr/share/swift/etc/swift/container-server.j2 delete mode 100644 swift/usr/share/swift/etc/swift/object-server.j2 delete mode 100644 swift/usr/share/swift/etc/swift/swift.j2 delete mode 100644 swift/usr/share/swift/hosts delete mode 100644 swift/usr/share/swift/swift-storage.yml delete mode 100644 vagrant-files/home/vagrant/.ssh/authorized_keys delete mode 100644 vagrant-files/manifest diff --git a/chef/manifest b/chef/manifest deleted file mode 100644 index de6cc542..00000000 --- a/chef/manifest +++ /dev/null @@ -1,3 +0,0 @@ -0040755 0 0 /root -0040700 1000 1000 /root/.ssh -0100600 1000 1000 /root/.ssh/authorized_keys diff --git a/clusters/cephclient.morph b/clusters/cephclient.morph index f9cc04a2..2585fbdf 100644 --- a/clusters/cephclient.morph +++ b/clusters/cephclient.morph @@ -17,4 +17,4 @@ systems: HOSTNAME: CephNode4 # You must install authorized_keys in chef/root/.ssh/ before this will work. - INSTALL_FILES: chef/manifest + INSTALL_FILES: install-files/chef/manifest diff --git a/clusters/ci.morph b/clusters/ci.morph index d37733ab..69b7c76a 100644 --- a/clusters/ci.morph +++ b/clusters/ci.morph @@ -74,7 +74,7 @@ systems: type: extensions/rawdisk location: baserock-openstack-system-x86_64.img DISK_SIZE: 5G - INSTALL_FILES: openstack/manifest + INSTALL_FILES: install-files/openstack/manifest HOSTNAME: onenode RABBITMQ_HOST: onenode RABBITMQ_PORT: 5672 diff --git a/clusters/example-distbuild-cluster.morph b/clusters/example-distbuild-cluster.morph index b5cd11ef..5208a5ca 100644 --- a/clusters/example-distbuild-cluster.morph +++ b/clusters/example-distbuild-cluster.morph @@ -16,7 +16,7 @@ systems: DISTBUILD_CONTROLLER: false DISTBUILD_WORKER: true FSTAB_SRC: LABEL=src /srv/distbuild auto defaults,rw,noatime 0 2 - INSTALL_FILES: distbuild/manifest + INSTALL_FILES: install-files/distbuild/manifest NFSBOOT_CONFIGURE: true TROVE_ID: $MY_TROVE WORKER_SSH_KEY: ssh-keys/worker.key diff --git a/clusters/example-swift-storage-cluster.morph b/clusters/example-swift-storage-cluster.morph index 2a512709..e5e7b6ab 100644 --- a/clusters/example-swift-storage-cluster.morph +++ b/clusters/example-swift-storage-cluster.morph @@ -3,7 +3,7 @@ kind: cluster systems: - morph: systems/swift-system-x86_64.morph deploy-defaults: - INSTALL_FILES: swift/manifest + INSTALL_FILES: install-files/swift/manifest CONTROLLER_HOST_ADDRESS: diff --git a/clusters/mason-openstack.morph b/clusters/mason-openstack.morph index 915e14e7..935e2496 100644 --- a/clusters/mason-openstack.morph +++ b/clusters/mason-openstack.morph @@ -10,7 +10,7 @@ systems: CONTROLLERHOST: controller-hostname DISTBUILD_CONTROLLER: true DISTBUILD_WORKER: true - INSTALL_FILES: distbuild/manifest + INSTALL_FILES: install-files/distbuild/manifest RAM_SIZE: 8G TROVE_HOST: your-upstream-trove TROVE_ID: your-upstream-trove-prefix diff --git a/clusters/mason.morph b/clusters/mason.morph index 21399ea0..376cf337 100644 --- a/clusters/mason.morph +++ b/clusters/mason.morph @@ -36,7 +36,7 @@ systems: CONTROLLERHOST: red-box-v1-controller.example.com DISTBUILD_CONTROLLER: false DISTBUILD_WORKER: true - INSTALL_FILES: distbuild/manifest + INSTALL_FILES: install-files/distbuild/manifest RAM_SIZE: 8G TROVE_HOST: upstream-trove TROVE_ID: upstream-trove diff --git a/clusters/moonshot-m2-armv8b64.morph b/clusters/moonshot-m2-armv8b64.morph index c46b1d9e..c6d62ca2 100644 --- a/clusters/moonshot-m2-armv8b64.morph +++ b/clusters/moonshot-m2-armv8b64.morph @@ -38,7 +38,7 @@ systems: HOSTNAME: installer-system-c31n1 DTB_PATH: boot/m400-1003.dtb KERNEL_ARGS: console=ttyS0,9600n8r init=/usr/lib/baserock-installer/installer - INSTALL_FILES: moonshot/manifest + INSTALL_FILES: install-files/moonshot/manifest MOONSHOT_KERNEL: yes subsystems: - morph: systems/devel-system-armv8b64.morph @@ -48,7 +48,7 @@ systems: location: /rootfs HOSTNAME: baserock-c31n1 DTB_PATH: boot/m400-1003.dtb - INSTALL_FILES: moonshot/manifest + INSTALL_FILES: install-files/moonshot/manifest MOONSHOT_KERNEL: yes BOOT_DEVICE: /dev/sda1 ROOT_DEVICE: /dev/sda2 diff --git a/clusters/moonshot-pxe-armv8b64.morph b/clusters/moonshot-pxe-armv8b64.morph index a16a3602..ffee0392 100644 --- a/clusters/moonshot-pxe-armv8b64.morph +++ b/clusters/moonshot-pxe-armv8b64.morph @@ -26,5 +26,5 @@ systems: HOSTNAME: baserock-c31n1 DTB_PATH: boot/m400-1003.dtb KERNEL_ARGS: console=ttyS0,9600n8r rw - INSTALL_FILES: moonshot/manifest + INSTALL_FILES: install-files/moonshot/manifest MOONSHOT_KERNEL: yes diff --git a/clusters/moonshot-pxe-armv8l64.morph b/clusters/moonshot-pxe-armv8l64.morph index 9fd7cee7..62ee92a2 100644 --- a/clusters/moonshot-pxe-armv8l64.morph +++ b/clusters/moonshot-pxe-armv8l64.morph @@ -18,5 +18,5 @@ systems: DTB_PATH: boot/m400-1003.dtb HOSTNAME: baserock-m400-node31 MOONSHOT_KERNEL: yes - INSTALL_FILES: moonshot/manifest + INSTALL_FILES: install-files/moonshot/manifest PXE_INSTALLER: no diff --git a/clusters/openstack-one-node-swift.morph b/clusters/openstack-one-node-swift.morph index 37429283..de7066d6 100644 --- a/clusters/openstack-one-node-swift.morph +++ b/clusters/openstack-one-node-swift.morph @@ -43,7 +43,7 @@ systems: type: extensions/rawdisk location: baserock-openstack-system-x86_64.img DISK_SIZE: 10G - INSTALL_FILES: openstack/manifest swift/manifest + INSTALL_FILES: install-files/openstack/manifest install-files/swift/manifest HOSTNAME: onenode diff --git a/clusters/openstack-one-node.morph b/clusters/openstack-one-node.morph index d6b4c582..ab12f9b3 100644 --- a/clusters/openstack-one-node.morph +++ b/clusters/openstack-one-node.morph @@ -43,7 +43,7 @@ systems: type: extensions/rawdisk location: baserock-openstack-system-x86_64.img DISK_SIZE: 10G - INSTALL_FILES: openstack/manifest + INSTALL_FILES: install-files/openstack/manifest HOSTNAME: onenode diff --git a/clusters/openstack-three-node-installer.morph b/clusters/openstack-three-node-installer.morph index afad454a..a316a56c 100644 --- a/clusters/openstack-three-node-installer.morph +++ b/clusters/openstack-three-node-installer.morph @@ -79,7 +79,7 @@ systems: network-to-install: &stack-node type: extensions/sysroot location: rootfs - INSTALL_FILES: openstack/manifest + INSTALL_FILES: install-files/openstack/manifest INITRAMFS_PATH: boot/initramfs.gz HOSTNAME: threenode-network diff --git a/clusters/openstack-two-node-installer.morph b/clusters/openstack-two-node-installer.morph index 53d0b7b1..d4f94cfb 100644 --- a/clusters/openstack-two-node-installer.morph +++ b/clusters/openstack-two-node-installer.morph @@ -78,7 +78,7 @@ systems: controller-to-install: &stack-node type: extensions/sysroot location: rootfs - INSTALL_FILES: openstack/manifest + INSTALL_FILES: install-files/openstack/manifest INITRAMFS_PATH: boot/initramfs.gz HOSTNAME: twonode-controller diff --git a/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator b/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator deleted file mode 100755 index 127bc84f..00000000 --- a/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh -read trove_host "$1/srv-distbuild-ccache.mount" < /root/.ssh/id_rsa.pub creates=/root/.ssh/id_rsa.pub - - - name: Add trove's host key - shell: | - trove_key="$(ssh-keyscan -t dsa,ecdsa,rsa {{ TROVE_HOST|quote }})" - if [ -n "$trove_key" ]; then - echo "$trove_key" > /etc/ssh/ssh_known_hosts - fi - creates=/etc/ssh/ssh_known_hosts - - # This is a kludge. We can add the host key for the TROVE_HOST that was - # specified, but users may access the Trove by other names, e.g. IP address - # or domain name. Distbuild is currently not safe to run except on a private - # network where host key checking is not important, so we disable it by - # default to avoid errors when users don't stick to using the exact same - # TROVE_HOST in repo URLs. - - name: Disable strict SSH host key checking - lineinfile: - dest: /etc/ssh/ssh_config - line: StrictHostKeyChecking no - - - name: Enable the morph-cache-server service - service: name=morph-cache-server.service enabled=yes - register: morph_cache_server_service - - name: Restart the morph-cache-server service - service: name=morph-cache-server state=restarted - when: morph_cache_server_service|changed - - - name: Enable the morph-worker service - service: name=morph-worker.service enabled=yes - register: morph_worker_service - when: DISTBUILD_WORKER - - name: Restart the morph-worker service - service: name=morph-worker state=restarted - when: morph_worker_service|changed - - - name: Enable the morph-worker-helper service - service: name=morph-worker-helper.service enabled=yes - register: morph_worker_helper_service - when: DISTBUILD_WORKER - - name: Restart the morph-worker-helper service - service: name=morph-worker-helper state=restarted - when: morph_worker_helper_service|changed - - - name: Enable the morph-controller service - service: name=morph-controller.service enabled=yes - register: morph_controller_service - when: DISTBUILD_CONTROLLER - - name: Restart the morph-controller service - service: name=morph-controller state=restarted - when: morph_controller_service|changed - - - name: Enable the morph-controller-helper service - service: name=morph-controller-helper.service enabled=yes - register: morph_controller_helper_service - when: DISTBUILD_CONTROLLER - - name: Restart the morph-controller-helper service - service: name=morph-controller-helper state=restarted - when: morph_controller_helper_service|changed diff --git a/distbuild/usr/lib/distbuild-setup/ansible/hosts b/distbuild/usr/lib/distbuild-setup/ansible/hosts deleted file mode 100644 index 5b97818d..00000000 --- a/distbuild/usr/lib/distbuild-setup/ansible/hosts +++ /dev/null @@ -1 +0,0 @@ -localhost ansible_connection=local diff --git a/distbuild/usr/lib/systemd/system/distbuild-setup.service b/distbuild/usr/lib/systemd/system/distbuild-setup.service deleted file mode 100644 index ec5f5a2d..00000000 --- a/distbuild/usr/lib/systemd/system/distbuild-setup.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=Run distbuild-setup Ansible scripts -Requires=network.target -After=network.target -Requires=opensshd.service -After=opensshd.service - -# If there's a shared /var subvolume, it must be mounted before this -# unit runs. -Requires=local-fs.target -After=local-fs.target - -ConditionPathExists=/etc/distbuild/distbuild.conf - -[Service] -ExecStart=/usr/bin/ansible-playbook -v -i /usr/lib/distbuild-setup/ansible/hosts /usr/lib/distbuild-setup/ansible/distbuild-setup.yml diff --git a/distbuild/usr/lib/systemd/system/morph-cache-server.service b/distbuild/usr/lib/systemd/system/morph-cache-server.service deleted file mode 100644 index f55f3b6d..00000000 --- a/distbuild/usr/lib/systemd/system/morph-cache-server.service +++ /dev/null @@ -1,12 +0,0 @@ -[Install] -WantedBy=multi-user.target - -[Unit] -Description=Morph cache server -Requires=local-fs.target network.target -After=local-fs.target network.target -ConditionPathExists=/etc/morph-cache-server.conf - -[Service] -ExecStart=/usr/bin/morph-cache-server -Restart=always diff --git a/distbuild/usr/lib/systemd/system/morph-controller-helper.service b/distbuild/usr/lib/systemd/system/morph-controller-helper.service deleted file mode 100644 index 3f30cbcf..00000000 --- a/distbuild/usr/lib/systemd/system/morph-controller-helper.service +++ /dev/null @@ -1,13 +0,0 @@ -[Install] -WantedBy=multi-user.target - -[Unit] -Description=Morph distributed build controller helper -Requires=morph-controller.service -After=morph-controller.service -ConditionPathExists=/etc/morph-controller.conf -ConditionPathExists=/etc/morph-controller-helper.conf - -[Service] -ExecStart=/usr/bin/distbuild-helper --config /etc/morph-controller-helper.conf -Restart=always diff --git a/distbuild/usr/lib/systemd/system/morph-controller.service b/distbuild/usr/lib/systemd/system/morph-controller.service deleted file mode 100644 index 1556d232..00000000 --- a/distbuild/usr/lib/systemd/system/morph-controller.service +++ /dev/null @@ -1,12 +0,0 @@ -[Install] -WantedBy=multi-user.target - -[Unit] -Description=Morph distributed build controller -Requires=local-fs.target network.target -After=local-fs.target network.target -ConditionPathExists=/etc/morph-controller.conf - -[Service] -ExecStart=/usr/bin/morph controller-daemon --config /etc/morph-controller.conf -Restart=always diff --git a/distbuild/usr/lib/systemd/system/morph-worker-helper.service b/distbuild/usr/lib/systemd/system/morph-worker-helper.service deleted file mode 100644 index 28400701..00000000 --- a/distbuild/usr/lib/systemd/system/morph-worker-helper.service +++ /dev/null @@ -1,13 +0,0 @@ -[Install] -WantedBy=multi-user.target - -[Unit] -Description=Morph distributed build worker helper -Requires=morph-worker.service -After=morph-worker.service -ConditionPathExists=/etc/morph-worker.conf -ConditionPathExists=/etc/morph-worker-helper.conf - -[Service] -ExecStart=/usr/bin/distbuild-helper --config /etc/morph-worker-helper.conf -Restart=always diff --git a/distbuild/usr/lib/systemd/system/morph-worker.service b/distbuild/usr/lib/systemd/system/morph-worker.service deleted file mode 100644 index 90fea404..00000000 --- a/distbuild/usr/lib/systemd/system/morph-worker.service +++ /dev/null @@ -1,13 +0,0 @@ -[Install] -WantedBy=multi-user.target - -[Unit] -Description=Morph distributed build worker -Requires=local-fs.target network.target -Wants=srv-distbuild-ccache.mount -After=local-fs.target network.target srv-distbuild-ccache.mount -ConditionPathExists=/etc/morph-worker.conf - -[Service] -ExecStart=/usr/bin/morph worker-daemon --config /etc/morph-worker.conf -Restart=always diff --git a/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service b/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service deleted file mode 120000 index 8f06febd..00000000 --- a/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service +++ /dev/null @@ -1 +0,0 @@ -../distbuild-setup.service \ No newline at end of file diff --git a/distbuild/usr/share/distbuild-setup/morph-cache-server.conf b/distbuild/usr/share/distbuild-setup/morph-cache-server.conf deleted file mode 100644 index b9020e7d..00000000 --- a/distbuild/usr/share/distbuild-setup/morph-cache-server.conf +++ /dev/null @@ -1,5 +0,0 @@ -[config] -port = 8080 -artifact-dir = /srv/distbuild/artifacts -direct-mode = True -fcgi-server = False diff --git a/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf b/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf deleted file mode 100644 index 99d38739..00000000 --- a/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf +++ /dev/null @@ -1,5 +0,0 @@ -[config] -log = /srv/distbuild/morph-controller-helper.log -log-max = 100M -parent-port = 5656 -parent-address = 127.0.0.1 diff --git a/distbuild/usr/share/distbuild-setup/morph-controller.conf b/distbuild/usr/share/distbuild-setup/morph-controller.conf deleted file mode 100644 index c16c0343..00000000 --- a/distbuild/usr/share/distbuild-setup/morph-controller.conf +++ /dev/null @@ -1,6 +0,0 @@ -[config] -log = /srv/distbuild/morph-controller.log -log-max = 100M -writeable-cache-server = http://{{ ARTIFACT_CACHE_SERVER }}:8081/ -worker = {{ WORKERS }} -controller-helper-address = 127.0.0.1 diff --git a/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf b/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf deleted file mode 100644 index 29d4ef3f..00000000 --- a/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf +++ /dev/null @@ -1,4 +0,0 @@ -[config] -log = /srv/distbuild/morph-worker-helper.log -log-max = 100M -parent-address = 127.0.0.1 diff --git a/distbuild/usr/share/distbuild-setup/morph-worker.conf b/distbuild/usr/share/distbuild-setup/morph-worker.conf deleted file mode 100644 index fb382bad..00000000 --- a/distbuild/usr/share/distbuild-setup/morph-worker.conf +++ /dev/null @@ -1,4 +0,0 @@ -[config] -log = /srv/distbuild/morph-worker.log -log-max = 100M -controller-initiator-address = diff --git a/distbuild/usr/share/distbuild-setup/morph.conf b/distbuild/usr/share/distbuild-setup/morph.conf deleted file mode 100644 index 29de684c..00000000 --- a/distbuild/usr/share/distbuild-setup/morph.conf +++ /dev/null @@ -1,13 +0,0 @@ -[config] -log = /srv/distbuild/morph.log -log-max = 100M -cachedir = /srv/distbuild -tempdir = /srv/distbuild/tmp -trove-host = {{ TROVE_HOST }} -trove-id = {{ TROVE_ID }} -controller-initiator-address = {{ CONTROLLERHOST }} -tempdir-min-space = 4G -cachedir-min-space = 4G -build-ref-prefix = {{ TROVE_ID }} -artifact-cache-server = http://{{ ARTIFACT_CACHE_SERVER }}:8080/ -git-resolve-cache-server = http://{{ TROVE_HOST }}:8080/ diff --git a/essential-files/etc/inputrc b/essential-files/etc/inputrc deleted file mode 100644 index ddee44cd..00000000 --- a/essential-files/etc/inputrc +++ /dev/null @@ -1,38 +0,0 @@ -# Allow the command prompt to wrap to the next line -set horizontal-scroll-mode Off - -# Enable 8bit input -set meta-flag On -set input-meta On - -# Turns off 8th bit stripping -set convert-meta Off - -# Keep the 8th bit for display -set output-meta On - -# none, visible or audible -set bell-style none - -# for linux console and RH/Debian xterm -"\e[1~": beginning-of-line -"\e[4~": end-of-line -"\e[5~": beginning-of-history -"\e[6~": end-of-history -"\e[7~": beginning-of-line -"\e[3~": delete-char -"\e[2~": quoted-insert -"\e[5C": forward-word -"\e[5D": backward-word -"\e\e[C": forward-word -"\e\e[D": backward-word -"\e[1;5C": forward-word -"\e[1;5D": backward-word - -# for non RH/Debian xterm, can't hurt for RH/DEbian xterm -"\eOH": beginning-of-line -"\eOF": end-of-line - -# for Konsole and freebsd console -"\e[H": beginning-of-line -"\e[F": end-of-line diff --git a/essential-files/etc/os-release b/essential-files/etc/os-release deleted file mode 100644 index b729c75f..00000000 --- a/essential-files/etc/os-release +++ /dev/null @@ -1,5 +0,0 @@ -NAME="Baserock" -ID=baserock -HOME_URL="http://wiki.baserock.org" -SUPPORT_URL="http://wiki.baserock.org/mailinglist" -BUG_REPORT_URL="http://wiki.baserock.org/mailinglist" diff --git a/essential-files/etc/profile b/essential-files/etc/profile deleted file mode 100644 index b306a132..00000000 --- a/essential-files/etc/profile +++ /dev/null @@ -1,13 +0,0 @@ -# /etc/profile - -# Set our default path -PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -export PATH - -# Source global bash config -if test "$PS1" && test "$BASH" && test -r /etc/bash.bashrc; then - . /etc/bash.bashrc -fi - -# Set default pager to less -export MANPAGER='less -R' diff --git a/essential-files/manifest b/essential-files/manifest deleted file mode 100644 index 2b77c237..00000000 --- a/essential-files/manifest +++ /dev/null @@ -1,8 +0,0 @@ -0040755 0 0 /etc -overwrite 0100644 0 0 /etc/os-release -overwrite 0100644 0 0 /etc/profile -overwrite 0100644 0 0 /etc/inputrc -0040755 0 0 /usr -0040755 0 0 /usr/lib -0040755 0 0 /usr/lib/tmpfiles.d -0100644 0 0 /usr/lib/tmpfiles.d/shutdownramfs.conf diff --git a/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf b/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf deleted file mode 100644 index 174f1f03..00000000 --- a/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf +++ /dev/null @@ -1,4 +0,0 @@ -# If /usr/lib/shutdownramfs has been populated, copy it into /run/initramfs so -# /run/initramfs/shutdown will be executed on shut-down, so that it may unmount -# the rootfs. -C /run/initramfs - - - - /usr/lib/shutdownramfs diff --git a/extensions/install-essential-files.configure b/extensions/install-essential-files.configure index 2779b0d4..cce9a5a5 100755 --- a/extensions/install-essential-files.configure +++ b/extensions/install-essential-files.configure @@ -31,7 +31,7 @@ class InstallEssentialFilesConfigureExtension(cliapp.Application): def process_args(self, args): target_root = args[0] - os.environ["INSTALL_FILES"] = "essential-files/manifest" + os.environ["INSTALL_FILES"] = "install-files/essential-files/manifest" self.install_essential_files(target_root) def install_essential_files(self, target_root): diff --git a/genivi-devel-system-armv7/etc/morph.conf b/genivi-devel-system-armv7/etc/morph.conf deleted file mode 120000 index 8f384049..00000000 --- a/genivi-devel-system-armv7/etc/morph.conf +++ /dev/null @@ -1 +0,0 @@ -/src/morph.conf \ No newline at end of file diff --git a/genivi-devel-system-armv7/manifest b/genivi-devel-system-armv7/manifest deleted file mode 100644 index 31980633..00000000 --- a/genivi-devel-system-armv7/manifest +++ /dev/null @@ -1,5 +0,0 @@ -0040755 0 0 /src -0040755 0 0 /src/tmp -0100666 0 0 /src/morph.conf -0040755 0 0 /etc -0120666 0 0 /etc/morph.conf diff --git a/genivi-devel-system-armv7/src/morph.conf b/genivi-devel-system-armv7/src/morph.conf deleted file mode 100644 index 76b6fde9..00000000 --- a/genivi-devel-system-armv7/src/morph.conf +++ /dev/null @@ -1,5 +0,0 @@ -[config] -log = /src/morph.log -cachedir = /src/cache -tempdir = /src/tmp -staging-chroot = true diff --git a/install-files/chef/manifest b/install-files/chef/manifest new file mode 100644 index 00000000..de6cc542 --- /dev/null +++ b/install-files/chef/manifest @@ -0,0 +1,3 @@ +0040755 0 0 /root +0040700 1000 1000 /root/.ssh +0100600 1000 1000 /root/.ssh/authorized_keys diff --git a/install-files/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator b/install-files/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator new file mode 100755 index 00000000..127bc84f --- /dev/null +++ b/install-files/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator @@ -0,0 +1,16 @@ +#!/bin/sh +read trove_host "$1/srv-distbuild-ccache.mount" < /root/.ssh/id_rsa.pub creates=/root/.ssh/id_rsa.pub + + - name: Add trove's host key + shell: | + trove_key="$(ssh-keyscan -t dsa,ecdsa,rsa {{ TROVE_HOST|quote }})" + if [ -n "$trove_key" ]; then + echo "$trove_key" > /etc/ssh/ssh_known_hosts + fi + creates=/etc/ssh/ssh_known_hosts + + # This is a kludge. We can add the host key for the TROVE_HOST that was + # specified, but users may access the Trove by other names, e.g. IP address + # or domain name. Distbuild is currently not safe to run except on a private + # network where host key checking is not important, so we disable it by + # default to avoid errors when users don't stick to using the exact same + # TROVE_HOST in repo URLs. + - name: Disable strict SSH host key checking + lineinfile: + dest: /etc/ssh/ssh_config + line: StrictHostKeyChecking no + + - name: Enable the morph-cache-server service + service: name=morph-cache-server.service enabled=yes + register: morph_cache_server_service + - name: Restart the morph-cache-server service + service: name=morph-cache-server state=restarted + when: morph_cache_server_service|changed + + - name: Enable the morph-worker service + service: name=morph-worker.service enabled=yes + register: morph_worker_service + when: DISTBUILD_WORKER + - name: Restart the morph-worker service + service: name=morph-worker state=restarted + when: morph_worker_service|changed + + - name: Enable the morph-worker-helper service + service: name=morph-worker-helper.service enabled=yes + register: morph_worker_helper_service + when: DISTBUILD_WORKER + - name: Restart the morph-worker-helper service + service: name=morph-worker-helper state=restarted + when: morph_worker_helper_service|changed + + - name: Enable the morph-controller service + service: name=morph-controller.service enabled=yes + register: morph_controller_service + when: DISTBUILD_CONTROLLER + - name: Restart the morph-controller service + service: name=morph-controller state=restarted + when: morph_controller_service|changed + + - name: Enable the morph-controller-helper service + service: name=morph-controller-helper.service enabled=yes + register: morph_controller_helper_service + when: DISTBUILD_CONTROLLER + - name: Restart the morph-controller-helper service + service: name=morph-controller-helper state=restarted + when: morph_controller_helper_service|changed diff --git a/install-files/distbuild/usr/lib/distbuild-setup/ansible/hosts b/install-files/distbuild/usr/lib/distbuild-setup/ansible/hosts new file mode 100644 index 00000000..5b97818d --- /dev/null +++ b/install-files/distbuild/usr/lib/distbuild-setup/ansible/hosts @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/install-files/distbuild/usr/lib/systemd/system/distbuild-setup.service b/install-files/distbuild/usr/lib/systemd/system/distbuild-setup.service new file mode 100644 index 00000000..ec5f5a2d --- /dev/null +++ b/install-files/distbuild/usr/lib/systemd/system/distbuild-setup.service @@ -0,0 +1,16 @@ +[Unit] +Description=Run distbuild-setup Ansible scripts +Requires=network.target +After=network.target +Requires=opensshd.service +After=opensshd.service + +# If there's a shared /var subvolume, it must be mounted before this +# unit runs. +Requires=local-fs.target +After=local-fs.target + +ConditionPathExists=/etc/distbuild/distbuild.conf + +[Service] +ExecStart=/usr/bin/ansible-playbook -v -i /usr/lib/distbuild-setup/ansible/hosts /usr/lib/distbuild-setup/ansible/distbuild-setup.yml diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-cache-server.service b/install-files/distbuild/usr/lib/systemd/system/morph-cache-server.service new file mode 100644 index 00000000..f55f3b6d --- /dev/null +++ b/install-files/distbuild/usr/lib/systemd/system/morph-cache-server.service @@ -0,0 +1,12 @@ +[Install] +WantedBy=multi-user.target + +[Unit] +Description=Morph cache server +Requires=local-fs.target network.target +After=local-fs.target network.target +ConditionPathExists=/etc/morph-cache-server.conf + +[Service] +ExecStart=/usr/bin/morph-cache-server +Restart=always diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-controller-helper.service b/install-files/distbuild/usr/lib/systemd/system/morph-controller-helper.service new file mode 100644 index 00000000..3f30cbcf --- /dev/null +++ b/install-files/distbuild/usr/lib/systemd/system/morph-controller-helper.service @@ -0,0 +1,13 @@ +[Install] +WantedBy=multi-user.target + +[Unit] +Description=Morph distributed build controller helper +Requires=morph-controller.service +After=morph-controller.service +ConditionPathExists=/etc/morph-controller.conf +ConditionPathExists=/etc/morph-controller-helper.conf + +[Service] +ExecStart=/usr/bin/distbuild-helper --config /etc/morph-controller-helper.conf +Restart=always diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-controller.service b/install-files/distbuild/usr/lib/systemd/system/morph-controller.service new file mode 100644 index 00000000..1556d232 --- /dev/null +++ b/install-files/distbuild/usr/lib/systemd/system/morph-controller.service @@ -0,0 +1,12 @@ +[Install] +WantedBy=multi-user.target + +[Unit] +Description=Morph distributed build controller +Requires=local-fs.target network.target +After=local-fs.target network.target +ConditionPathExists=/etc/morph-controller.conf + +[Service] +ExecStart=/usr/bin/morph controller-daemon --config /etc/morph-controller.conf +Restart=always diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-worker-helper.service b/install-files/distbuild/usr/lib/systemd/system/morph-worker-helper.service new file mode 100644 index 00000000..28400701 --- /dev/null +++ b/install-files/distbuild/usr/lib/systemd/system/morph-worker-helper.service @@ -0,0 +1,13 @@ +[Install] +WantedBy=multi-user.target + +[Unit] +Description=Morph distributed build worker helper +Requires=morph-worker.service +After=morph-worker.service +ConditionPathExists=/etc/morph-worker.conf +ConditionPathExists=/etc/morph-worker-helper.conf + +[Service] +ExecStart=/usr/bin/distbuild-helper --config /etc/morph-worker-helper.conf +Restart=always diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-worker.service b/install-files/distbuild/usr/lib/systemd/system/morph-worker.service new file mode 100644 index 00000000..90fea404 --- /dev/null +++ b/install-files/distbuild/usr/lib/systemd/system/morph-worker.service @@ -0,0 +1,13 @@ +[Install] +WantedBy=multi-user.target + +[Unit] +Description=Morph distributed build worker +Requires=local-fs.target network.target +Wants=srv-distbuild-ccache.mount +After=local-fs.target network.target srv-distbuild-ccache.mount +ConditionPathExists=/etc/morph-worker.conf + +[Service] +ExecStart=/usr/bin/morph worker-daemon --config /etc/morph-worker.conf +Restart=always diff --git a/install-files/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service b/install-files/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service new file mode 120000 index 00000000..8f06febd --- /dev/null +++ b/install-files/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service @@ -0,0 +1 @@ +../distbuild-setup.service \ No newline at end of file diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-cache-server.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-cache-server.conf new file mode 100644 index 00000000..b9020e7d --- /dev/null +++ b/install-files/distbuild/usr/share/distbuild-setup/morph-cache-server.conf @@ -0,0 +1,5 @@ +[config] +port = 8080 +artifact-dir = /srv/distbuild/artifacts +direct-mode = True +fcgi-server = False diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf new file mode 100644 index 00000000..99d38739 --- /dev/null +++ b/install-files/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf @@ -0,0 +1,5 @@ +[config] +log = /srv/distbuild/morph-controller-helper.log +log-max = 100M +parent-port = 5656 +parent-address = 127.0.0.1 diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-controller.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-controller.conf new file mode 100644 index 00000000..c16c0343 --- /dev/null +++ b/install-files/distbuild/usr/share/distbuild-setup/morph-controller.conf @@ -0,0 +1,6 @@ +[config] +log = /srv/distbuild/morph-controller.log +log-max = 100M +writeable-cache-server = http://{{ ARTIFACT_CACHE_SERVER }}:8081/ +worker = {{ WORKERS }} +controller-helper-address = 127.0.0.1 diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf new file mode 100644 index 00000000..29d4ef3f --- /dev/null +++ b/install-files/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf @@ -0,0 +1,4 @@ +[config] +log = /srv/distbuild/morph-worker-helper.log +log-max = 100M +parent-address = 127.0.0.1 diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-worker.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-worker.conf new file mode 100644 index 00000000..fb382bad --- /dev/null +++ b/install-files/distbuild/usr/share/distbuild-setup/morph-worker.conf @@ -0,0 +1,4 @@ +[config] +log = /srv/distbuild/morph-worker.log +log-max = 100M +controller-initiator-address = diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph.conf b/install-files/distbuild/usr/share/distbuild-setup/morph.conf new file mode 100644 index 00000000..29de684c --- /dev/null +++ b/install-files/distbuild/usr/share/distbuild-setup/morph.conf @@ -0,0 +1,13 @@ +[config] +log = /srv/distbuild/morph.log +log-max = 100M +cachedir = /srv/distbuild +tempdir = /srv/distbuild/tmp +trove-host = {{ TROVE_HOST }} +trove-id = {{ TROVE_ID }} +controller-initiator-address = {{ CONTROLLERHOST }} +tempdir-min-space = 4G +cachedir-min-space = 4G +build-ref-prefix = {{ TROVE_ID }} +artifact-cache-server = http://{{ ARTIFACT_CACHE_SERVER }}:8080/ +git-resolve-cache-server = http://{{ TROVE_HOST }}:8080/ diff --git a/install-files/essential-files/etc/inputrc b/install-files/essential-files/etc/inputrc new file mode 100644 index 00000000..ddee44cd --- /dev/null +++ b/install-files/essential-files/etc/inputrc @@ -0,0 +1,38 @@ +# Allow the command prompt to wrap to the next line +set horizontal-scroll-mode Off + +# Enable 8bit input +set meta-flag On +set input-meta On + +# Turns off 8th bit stripping +set convert-meta Off + +# Keep the 8th bit for display +set output-meta On + +# none, visible or audible +set bell-style none + +# for linux console and RH/Debian xterm +"\e[1~": beginning-of-line +"\e[4~": end-of-line +"\e[5~": beginning-of-history +"\e[6~": end-of-history +"\e[7~": beginning-of-line +"\e[3~": delete-char +"\e[2~": quoted-insert +"\e[5C": forward-word +"\e[5D": backward-word +"\e\e[C": forward-word +"\e\e[D": backward-word +"\e[1;5C": forward-word +"\e[1;5D": backward-word + +# for non RH/Debian xterm, can't hurt for RH/DEbian xterm +"\eOH": beginning-of-line +"\eOF": end-of-line + +# for Konsole and freebsd console +"\e[H": beginning-of-line +"\e[F": end-of-line diff --git a/install-files/essential-files/etc/os-release b/install-files/essential-files/etc/os-release new file mode 100644 index 00000000..b729c75f --- /dev/null +++ b/install-files/essential-files/etc/os-release @@ -0,0 +1,5 @@ +NAME="Baserock" +ID=baserock +HOME_URL="http://wiki.baserock.org" +SUPPORT_URL="http://wiki.baserock.org/mailinglist" +BUG_REPORT_URL="http://wiki.baserock.org/mailinglist" diff --git a/install-files/essential-files/etc/profile b/install-files/essential-files/etc/profile new file mode 100644 index 00000000..b306a132 --- /dev/null +++ b/install-files/essential-files/etc/profile @@ -0,0 +1,13 @@ +# /etc/profile + +# Set our default path +PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +export PATH + +# Source global bash config +if test "$PS1" && test "$BASH" && test -r /etc/bash.bashrc; then + . /etc/bash.bashrc +fi + +# Set default pager to less +export MANPAGER='less -R' diff --git a/install-files/essential-files/manifest b/install-files/essential-files/manifest new file mode 100644 index 00000000..2b77c237 --- /dev/null +++ b/install-files/essential-files/manifest @@ -0,0 +1,8 @@ +0040755 0 0 /etc +overwrite 0100644 0 0 /etc/os-release +overwrite 0100644 0 0 /etc/profile +overwrite 0100644 0 0 /etc/inputrc +0040755 0 0 /usr +0040755 0 0 /usr/lib +0040755 0 0 /usr/lib/tmpfiles.d +0100644 0 0 /usr/lib/tmpfiles.d/shutdownramfs.conf diff --git a/install-files/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf b/install-files/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf new file mode 100644 index 00000000..174f1f03 --- /dev/null +++ b/install-files/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf @@ -0,0 +1,4 @@ +# If /usr/lib/shutdownramfs has been populated, copy it into /run/initramfs so +# /run/initramfs/shutdown will be executed on shut-down, so that it may unmount +# the rootfs. +C /run/initramfs - - - - /usr/lib/shutdownramfs diff --git a/install-files/genivi-devel-system-armv7/etc/morph.conf b/install-files/genivi-devel-system-armv7/etc/morph.conf new file mode 120000 index 00000000..8f384049 --- /dev/null +++ b/install-files/genivi-devel-system-armv7/etc/morph.conf @@ -0,0 +1 @@ +/src/morph.conf \ No newline at end of file diff --git a/install-files/genivi-devel-system-armv7/manifest b/install-files/genivi-devel-system-armv7/manifest new file mode 100644 index 00000000..31980633 --- /dev/null +++ b/install-files/genivi-devel-system-armv7/manifest @@ -0,0 +1,5 @@ +0040755 0 0 /src +0040755 0 0 /src/tmp +0100666 0 0 /src/morph.conf +0040755 0 0 /etc +0120666 0 0 /etc/morph.conf diff --git a/install-files/genivi-devel-system-armv7/src/morph.conf b/install-files/genivi-devel-system-armv7/src/morph.conf new file mode 100644 index 00000000..76b6fde9 --- /dev/null +++ b/install-files/genivi-devel-system-armv7/src/morph.conf @@ -0,0 +1,5 @@ +[config] +log = /src/morph.log +cachedir = /src/cache +tempdir = /src/tmp +staging-chroot = true diff --git a/install-files/moonshot/boot/m400-1003.dtb b/install-files/moonshot/boot/m400-1003.dtb new file mode 100644 index 00000000..d6fd83ee Binary files /dev/null and b/install-files/moonshot/boot/m400-1003.dtb differ diff --git a/install-files/moonshot/manifest b/install-files/moonshot/manifest new file mode 100644 index 00000000..dd80fe49 --- /dev/null +++ b/install-files/moonshot/manifest @@ -0,0 +1,2 @@ +0040755 0 0 /boot +0100744 0 0 /boot/m400-1003.dtb diff --git a/install-files/openstack/etc/horizon/apache-horizon.conf b/install-files/openstack/etc/horizon/apache-horizon.conf new file mode 100644 index 00000000..ea88897a --- /dev/null +++ b/install-files/openstack/etc/horizon/apache-horizon.conf @@ -0,0 +1,34 @@ + + WSGIScriptAlias /horizon /var/lib/horizon/openstack_dashboard/django.wsgi + WSGIDaemonProcess horizon user=horizon group=horizon processes=3 threads=10 home=/var/lib/horizon display-name=horizon + WSGIApplicationGroup %{GLOBAL} + + RedirectMatch ^/$ /horizon/ + + SetEnv APACHE_RUN_USER apache + SetEnv APACHE_RUN_GROUP apache + WSGIProcessGroup horizon + + DocumentRoot /var/lib/horizon/.blackhole + Alias /static /var/lib/horizon/openstack_dashboard/static + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + # Apache 2.4 uses mod_authz_host for access control now (instead of + # "Allow") + + Order allow,deny + Allow from all + + = 2.4> + Require all granted + + + + ErrorLog /var/log/httpd/horizon_error.log + LogLevel warn + CustomLog /var/log/httpd/horizon_access.log combined + + +WSGISocketPrefix /var/run/httpd diff --git a/install-files/openstack/etc/horizon/openstack_dashboard/local_settings.py b/install-files/openstack/etc/horizon/openstack_dashboard/local_settings.py new file mode 100644 index 00000000..febc3e70 --- /dev/null +++ b/install-files/openstack/etc/horizon/openstack_dashboard/local_settings.py @@ -0,0 +1,551 @@ +import os + +from django.utils.translation import ugettext_lazy as _ + +from openstack_dashboard import exceptions + +DEBUG = True +TEMPLATE_DEBUG = DEBUG + +STATIC_ROOT = "/var/lib/horizon/openstack_dashboard/static" + +# Required for Django 1.5. +# If horizon is running in production (DEBUG is False), set this +# with the list of host/domain names that the application can serve. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts +#ALLOWED_HOSTS = ['horizon.example.com', ] +ALLOWED_HOSTS = ['*'] + +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +#CSRF_COOKIE_SECURE = True +#SESSION_COOKIE_SECURE = True + +# Overrides for OpenStack API versions. Use this setting to force the +# OpenStack dashboard to use a specific API version for a given service API. +# NOTE: The version should be formatted as it appears in the URL for the +# service API. For example, The identity service APIs have inconsistent +# use of the decimal point, so valid options would be "2.0" or "3". +# OPENSTACK_API_VERSIONS = { +# "data_processing": 1.1, +# "identity": 3, +# "volume": 2 +# } + +# Set this to True if running on multi-domain model. When this is enabled, it +# will require user to enter the Domain name in addition to username for login. +# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False + +# Overrides the default domain used when running on single-domain model +# with Keystone V3. All entities will be created in the default domain. +# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' + +# Set Console type: +# valid options would be "AUTO"(default), "VNC", "SPICE", "RDP" or None +# Set to None explicitly if you want to deactivate the console. +# CONSOLE_TYPE = "AUTO" + +# Default OpenStack Dashboard configuration. +HORIZON_CONFIG = { + 'user_home': 'openstack_dashboard.views.get_user_home', + 'ajax_queue_limit': 10, + 'auto_fade_alerts': { + 'delay': 3000, + 'fade_duration': 1500, + 'types': ['alert-success', 'alert-info'] + }, + 'help_url': "http://docs.openstack.org", + 'exceptions': {'recoverable': exceptions.RECOVERABLE, + 'not_found': exceptions.NOT_FOUND, + 'unauthorized': exceptions.UNAUTHORIZED}, + 'modal_backdrop': 'static', + 'angular_modules': [], + 'js_files': [], +} + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# } + +# Disable simplified floating IP address management for deployments with +# multiple floating IP pools or complex network requirements. +# HORIZON_CONFIG["simple_ip_management"] = False + +# Turn off browser autocompletion for forms including the login form and +# the database creation workflow if so desired. +# HORIZON_CONFIG["password_autocomplete"] = "off" + +# Setting this to True will disable the reveal button for password fields, +# including on the login form. +# HORIZON_CONFIG["disable_password_reveal"] = False + +#LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +LOCAL_PATH = "/var/lib/horizon" + +# Set custom secret key: +# You can either set it to a specific value or you can let horizon generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. +from horizon.utils import secret_key +SECRET_KEY = secret_key.generate_or_read_from_file( + os.path.join(LOCAL_PATH, '.secret_key_store')) + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': '127.0.0.1:11211', + } +} + +#CACHES = { +# 'default': { +# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache' +# } +#} + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +# EMAIL_HOST = 'smtp.my-company.com' +# EMAIL_PORT = 25 +# EMAIL_HOST_USER = 'djangomail' +# EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" + +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + +# The CA certificate to use to verify SSL connections +# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_group': True, + 'can_edit_project': True, + 'can_edit_domain': True, + 'can_edit_role': True +} + +#Setting this to True, will add a new "Retrieve Password" action on instance, +#allowing Admin session password retrieval/decryption. +#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False + +# The Xen Hypervisor has the ability to set the mount point for volumes +# attached to instances (other Hypervisors currently do not). Setting +# can_set_mount_point to True will add the option to set the mount point +# from the UI. +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': False, + 'can_set_password': False, +} + +# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional +# services provided by cinder that is not exposed by its extension API. +OPENSTACK_CINDER_FEATURES = { + 'enable_backup': False, +} + +# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional +# services provided by neutron. Options currently available are load +# balancer service, security groups, quotas, VPN service. +OPENSTACK_NEUTRON_NETWORK = { + 'enable_router': True, + 'enable_quotas': True, + 'enable_ipv6': True, + 'enable_distributed_router': False, + 'enable_ha_router': False, + 'enable_lb': True, + 'enable_firewall': True, + 'enable_vpn': True, + # The profile_support option is used to detect if an external router can be + # configured via the dashboard. When using specific plugins the + # profile_support can be turned on if needed. + 'profile_support': None, + #'profile_support': 'cisco', + # Set which provider network types are supported. Only the network types + # in this list will be available to choose from when creating a network. + # Network types include local, flat, vlan, gre, and vxlan. + 'supported_provider_types': ['*'], +} + +# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features +# in the OpenStack Dashboard related to the Image service, such as the list +# of supported image formats. +# OPENSTACK_IMAGE_BACKEND = { +# 'image_formats': [ +# ('', _('Select format')), +# ('aki', _('AKI - Amazon Kernel Image')), +# ('ami', _('AMI - Amazon Machine Image')), +# ('ari', _('ARI - Amazon Ramdisk Image')), +# ('iso', _('ISO - Optical Disk Image')), +# ('qcow2', _('QCOW2 - QEMU Emulator')), +# ('raw', _('Raw')), +# ('vdi', _('VDI')), +# ('vhd', _('VHD')), +# ('vmdk', _('VMDK')) +# ] +# } + +# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for +# image custom property attributes that appear on image detail pages. +IMAGE_CUSTOM_PROPERTY_TITLES = { + "architecture": _("Architecture"), + "kernel_id": _("Kernel ID"), + "ramdisk_id": _("Ramdisk ID"), + "image_state": _("Euca2ools state"), + "project_id": _("Project ID"), + "image_type": _("Image Type") +} + +# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image +# custom properties should not be displayed in the Image Custom Properties +# table. +IMAGE_RESERVED_CUSTOM_PROPERTIES = [] + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'publicURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the +# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is None. This +# value should differ from OPENSTACK_ENDPOINT_TYPE if used. +#SECONDARY_ENDPOINT_TYPE = "publicURL" + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# Specify a maximum number of items to display in a dropdown. +DROPDOWN_MAX_ITEMS = 30 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +# When launching an instance, the menu of available flavors is +# sorted by RAM usage, ascending. If you would like a different sort order, +# you can provide another flavor attribute as sorting key. Alternatively, you +# can provide a custom callback method to use for sorting. You can also provide +# a flag for reverse sort. For more info, see +# http://docs.python.org/2/library/functions.html#sorted +# CREATE_INSTANCE_FLAVOR_SORT = { +# 'key': 'name', +# # or +# 'key': my_awesome_callback_method, +# 'reverse': False, +# } + +# The Horizon Policy Enforcement engine uses these values to load per service +# policy rule files. The content of these files should match the files the +# OpenStack services are using to determine role based access control in the +# target installation. + +# Path to directory containing policy.json files +#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") +# Map of local copy of service policy files +#POLICY_FILES = { +# 'identity': 'keystone_policy.json', +# 'compute': 'nova_policy.json', +# 'volume': 'cinder_policy.json', +# 'image': 'glance_policy.json', +# 'orchestration': 'heat_policy.json', +# 'network': 'neutron_policy.json', +#} + +# Trove user and database extension support. By default support for +# creating users and databases on database instances is turned on. +# To disable these extensions set the permission here to something +# unusable such as ["!"]. +# TROVE_ADD_USER_PERMS = [] +# TROVE_ADD_DATABASE_PERMS = [] + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'django.utils.log.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'cinderclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'neutronclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'heatclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'ceilometerclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'troveclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'swiftclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_auth': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'django': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'iso8601': { + 'handlers': ['null'], + 'propagate': False, + }, + 'scss': { + 'handlers': ['null'], + 'propagate': False, + }, + } +} + +# 'direction' should not be specified for all_tcp/udp/icmp. +# It is specified in the form. +SECURITY_GROUP_RULES = { + 'all_tcp': { + 'name': _('All TCP'), + 'ip_protocol': 'tcp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_udp': { + 'name': _('All UDP'), + 'ip_protocol': 'udp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_icmp': { + 'name': _('All ICMP'), + 'ip_protocol': 'icmp', + 'from_port': '-1', + 'to_port': '-1', + }, + 'ssh': { + 'name': 'SSH', + 'ip_protocol': 'tcp', + 'from_port': '22', + 'to_port': '22', + }, + 'smtp': { + 'name': 'SMTP', + 'ip_protocol': 'tcp', + 'from_port': '25', + 'to_port': '25', + }, + 'dns': { + 'name': 'DNS', + 'ip_protocol': 'tcp', + 'from_port': '53', + 'to_port': '53', + }, + 'http': { + 'name': 'HTTP', + 'ip_protocol': 'tcp', + 'from_port': '80', + 'to_port': '80', + }, + 'pop3': { + 'name': 'POP3', + 'ip_protocol': 'tcp', + 'from_port': '110', + 'to_port': '110', + }, + 'imap': { + 'name': 'IMAP', + 'ip_protocol': 'tcp', + 'from_port': '143', + 'to_port': '143', + }, + 'ldap': { + 'name': 'LDAP', + 'ip_protocol': 'tcp', + 'from_port': '389', + 'to_port': '389', + }, + 'https': { + 'name': 'HTTPS', + 'ip_protocol': 'tcp', + 'from_port': '443', + 'to_port': '443', + }, + 'smtps': { + 'name': 'SMTPS', + 'ip_protocol': 'tcp', + 'from_port': '465', + 'to_port': '465', + }, + 'imaps': { + 'name': 'IMAPS', + 'ip_protocol': 'tcp', + 'from_port': '993', + 'to_port': '993', + }, + 'pop3s': { + 'name': 'POP3S', + 'ip_protocol': 'tcp', + 'from_port': '995', + 'to_port': '995', + }, + 'ms_sql': { + 'name': 'MS SQL', + 'ip_protocol': 'tcp', + 'from_port': '1433', + 'to_port': '1433', + }, + 'mysql': { + 'name': 'MYSQL', + 'ip_protocol': 'tcp', + 'from_port': '3306', + 'to_port': '3306', + }, + 'rdp': { + 'name': 'RDP', + 'ip_protocol': 'tcp', + 'from_port': '3389', + 'to_port': '3389', + }, +} + +# Deprecation Notice: +# +# The setting FLAVOR_EXTRA_KEYS has been deprecated. +# Please load extra spec metadata into the Glance Metadata Definition Catalog. +# +# The sample quota definitions can be found in: +# /etc/metadefs/compute-quota.json +# +# The metadata definition catalog supports CLI and API: +# $glance --os-image-api-version 2 help md-namespace-import +# $glance-manage db_load_metadefs +# +# See Metadata Definitions on: http://docs.openstack.org/developer/glance/ + +# Indicate to the Sahara data processing service whether or not +# automatic floating IP allocation is in effect. If it is not +# in effect, the user will be prompted to choose a floating IP +# pool for use in their cluster. False by default. You would want +# to set this to True if you were running Nova Networking with +# auto_assign_floating_ip = True. +# SAHARA_AUTO_IP_ALLOCATION_ENABLED = False + +# The hash algorithm to use for authentication tokens. This must +# match the hash algorithm that the identity server and the +# auth_token middleware are using. Allowed values are the +# algorithms supported by Python's hashlib library. +# OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' +LOGIN_URL='/horizon/auth/login/' +LOGOUT_URL='/horizon/auth/logout/' +LOGIN_REDIRECT_URL='/horizon/' diff --git a/install-files/openstack/etc/tempest/tempest.conf b/install-files/openstack/etc/tempest/tempest.conf new file mode 100644 index 00000000..05f0eca1 --- /dev/null +++ b/install-files/openstack/etc/tempest/tempest.conf @@ -0,0 +1,1116 @@ +[DEFAULT] + +# +# From tempest.config +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking = false + +# Directory to use for lock files. (string value) +lock_path = /run/lock + +# +# From tempest.config +# + +# Print debugging output (set logging level to DEBUG instead of +# default WARNING level). (boolean value) +#debug = false + +# Print more verbose output (set logging level to INFO instead of +# default WARNING level). (boolean value) +#verbose = false + +# +# From tempest.config +# + +# The name of a logging configuration file. This file is appended to +# any existing logging configuration files. For details about logging +# configuration files, see the Python logging module documentation. +# (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Format string for %%(asctime)s in log records. Default: %(default)s +# . (string value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) The base directory used for relative --log-file paths. +# (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# (Optional) Name of log file to output to. If no default is set, +# logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# DEPRECATED. A logging.Formatter log message format string which may +# use any of the available logging.LogRecord attributes. This option +# is deprecated. Please use logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format = + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility = LOG_USER + +# Use syslog for logging. Existing syslog format is DEPRECATED during +# I, and will change in J to honor RFC5424. (boolean value) +use_syslog = true + +# (Optional) Enables or disables syslog rfc5424 format for logging. If +# enabled, prefixes the MSG part of the syslog message with APP-NAME +# (RFC5424). The format without the APP-NAME is deprecated in I, and +# will be removed in J. (boolean value) +#use_syslog_rfc_format = false + +# +# From tempest.config +# + +# Log output to standard error. (boolean value) +#use_stderr = true + +# +# From tempest.config +# + +# List of logger=LEVEL pairs. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# The format for an instance that is passed with the log message. +# (string value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. +# (string value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Format string to use for log messages with context. (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Format string to use for log messages without context. (string +# value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Prefix each line of exception output with this format. (string +# value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + + +[auth] + +# +# From tempest.config +# + +# Allows test cases to create/destroy tenants and users. This option +# requires that OpenStack Identity API admin credentials are known. If +# false, isolated test cases and parallel execution, can still be +# achieved configuring a list of test accounts (boolean value) +# Deprecated group/name - [compute]/allow_tenant_isolation +# Deprecated group/name - [orchestration]/allow_tenant_isolation +allow_tenant_isolation = true + +# If set to True it enables the Accounts provider, which locks +# credentials to allow for parallel execution with pre-provisioned +# accounts. It can only be used to run tests that ensure credentials +# cleanup happens. It requires at least `2 * CONC` distinct accounts +# configured in `test_accounts_file`, with CONC == the number of +# concurrent test processes. (boolean value) +#locking_credentials_provider = false + +# Path to the yaml file that contains the list of credentials to use +# for running tests (string value) +#test_accounts_file = etc/accounts.yaml + + +[baremetal] + +# +# From tempest.config +# + +# Timeout for Ironic node to completely provision (integer value) +#active_timeout = 300 + +# Timeout for association of Nova instance and Ironic node (integer +# value) +#association_timeout = 30 + +# Catalog type of the baremetal provisioning service (string value) +#catalog_type = baremetal + +# Driver name which Ironic uses (string value) +#driver = fake + +# Whether the Ironic nova-compute driver is enabled (boolean value) +#driver_enabled = false + +# The endpoint type to use for the baremetal provisioning service +# (string value) +#endpoint_type = publicURL + +# Timeout for Ironic power transitions. (integer value) +#power_timeout = 60 + +# Timeout for unprovisioning an Ironic node. (integer value) +#unprovision_timeout = 60 + + +[boto] + +# +# From tempest.config +# + +# AKI Kernel Image manifest (string value) +#aki_manifest = cirros-0.3.0-x86_64-vmlinuz.manifest.xml + +# AMI Machine Image manifest (string value) +#ami_manifest = cirros-0.3.0-x86_64-blank.img.manifest.xml + +# ARI Ramdisk Image manifest (string value) +#ari_manifest = cirros-0.3.0-x86_64-initrd.manifest.xml + +# AWS Access Key (string value) +#aws_access = + +# AWS Secret Key (string value) +#aws_secret = + +# AWS Zone for EC2 tests (string value) +#aws_zone = nova + +# Status Change Test Interval (integer value) +#build_interval = 1 + +# Status Change Timeout (integer value) +#build_timeout = 60 + +# EC2 URL (string value) +#ec2_url = http://localhost:8773/services/Cloud + +# boto Http socket timeout (integer value) +#http_socket_timeout = 3 + +# Instance type (string value) +#instance_type = m1.tiny + +# boto num_retries on error (integer value) +#num_retries = 1 + +# S3 Materials Path (string value) +#s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.0 + +# S3 URL (string value) +#s3_url = http://localhost:8080 + + +[cli] + +# +# From tempest.config +# + +# directory where python client binaries are located (string value) +cli_dir = /usr/bin + +# enable cli tests (boolean value) +#enabled = true + +# Whether the tempest run location has access to the *-manage +# commands. In a pure blackbox environment it will not. (boolean +# value) +#has_manage = true + +# Number of seconds to wait on a CLI timeout (integer value) +#timeout = 15 + + +[compute] + +# +# From tempest.config +# + +# Time in seconds between build status checks. (integer value) +#build_interval = 1 + +# Timeout in seconds to wait for an instance to build. (integer value) +#build_timeout = 300 + +# Catalog type of the Compute service. (string value) +#catalog_type = compute + +# Catalog type of the Compute v3 service. (string value) +#catalog_v3_type = computev3 + +# The endpoint type to use for the compute service. (string value) +#endpoint_type = publicURL + +# Visible fixed network name (string value) +#fixed_network_name = private + +# Valid primary flavor to use in tests. (string value) +#flavor_ref = 1 + +# Valid secondary flavor to be used in tests. (string value) +#flavor_ref_alt = 2 + +# Unallocated floating IP range, which will be used to test the +# floating IP bulk feature for CRUD operation. (string value) +#floating_ip_range = 10.0.0.0/29 + +# Password used to authenticate to an instance using the alternate +# image. (string value) +#image_alt_ssh_password = password + +# User name used to authenticate to an instance using the alternate +# image. (string value) +#image_alt_ssh_user = root + +# Valid primary image reference to be used in tests. This is a +# required option (string value) +#image_ref = + +# Valid secondary image reference to be used in tests. This is a +# required option, but if only one image is available duplicate the +# value of image_ref above (string value) +#image_ref_alt = + +# Password used to authenticate to an instance. (string value) +#image_ssh_password = password + +# User name used to authenticate to an instance. (string value) +#image_ssh_user = root + +# IP version used for SSH connections. (integer value) +#ip_version_for_ssh = 4 + +# Network used for SSH connections. (string value) +#network_for_ssh = public + +# Path to a private key file for SSH access to remote hosts (string +# value) +#path_to_private_key = + +# Timeout in seconds to wait for ping to succeed. (integer value) +#ping_timeout = 120 + +# Additional wait time for clean state, when there is no OS-EXT-STS +# extension available (integer value) +#ready_wait = 0 + +# The compute region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + +# Should the tests ssh to instances? (boolean value) +#run_ssh = false + +# Time in seconds before a shelved instance is eligible for removing +# from a host. -1 never offload, 0 offload when shelved. This time +# should be the same as the time of nova.conf, and some tests will run +# for as long as the time. (integer value) +#shelved_offload_time = 0 + +# Auth method used for authenticate to the instance. Valid choices +# are: keypair, configured, adminpass. keypair: start the servers with +# an ssh keypair. configured: use the configured user and password. +# adminpass: use the injected adminPass. disabled: avoid using ssh +# when it is an option. (string value) +#ssh_auth_method = keypair + +# Timeout in seconds to wait for output from ssh channel. (integer +# value) +#ssh_channel_timeout = 60 + +# How to connect to the instance? fixed: using the first ip belongs +# the fixed network floating: creating and using a floating ip (string +# value) +#ssh_connect_method = fixed + +# Timeout in seconds to wait for authentication to succeed. (integer +# value) +#ssh_timeout = 300 + +# User name used to authenticate to an instance. (string value) +#ssh_user = root + +# Does SSH use Floating IPs? (boolean value) +#use_floatingip_for_ssh = true + +# Expected device name when a volume is attached to an instance +# (string value) +#volume_device_name = vdb + + +[compute-admin] + +# +# From tempest.config +# + +# Domain name for authentication as admin (Keystone V3).The same +# domain applies to user and project (string value) +#domain_name = + +# API key to use when authenticating as admin. (string value) +password = {{ NOVA_SERVICE_PASSWORD }} + +# Administrative Tenant name to use for Nova API requests. (string +# value) +tenant_name = service + +# Administrative Username to use for Nova API requests. (string value) +username = {{ NOVA_SERVICE_USER }} + + +[compute-feature-enabled] + +# +# From tempest.config +# + +# A list of enabled compute extensions with a special entry all which +# indicates every extension is enabled. Each extension should be +# specified with alias name. Empty list indicates all extensions are +# disabled (list value) +#api_extensions = all + +# If false, skip all nova v3 tests. (boolean value) +api_v3 = false + +# A list of enabled v3 extensions with a special entry all which +# indicates every extension is enabled. Each extension should be +# specified with alias name. Empty list indicates all extensions are +# disabled (list value) +#api_v3_extensions = all + +# Does the test environment block migration support cinder iSCSI +# volumes (boolean value) +#block_migrate_cinder_iscsi = false + +# Does the test environment use block devices for live migration +# (boolean value) +#block_migration_for_live_migration = false + +# Does the test environment support changing the admin password? +# (boolean value) +#change_password = false + +# Does the test environment support obtaining instance serial console +# output? (boolean value) +#console_output = true + +# If false, skip disk config tests (boolean value) +#disk_config = true + +# Enables returning of the instance password by the relevant server +# API calls such as create, rebuild or rescue. (boolean value) +#enable_instance_password = true + +# Does the test environment support dynamic network interface +# attachment? (boolean value) +#interface_attach = true + +# Does the test environment support live migration available? (boolean +# value) +#live_migration = false + +# Does the test environment support pausing? (boolean value) +#pause = true + +# Enable RDP console. This configuration value should be same as +# [nova.rdp]->enabled in nova.conf (boolean value) +#rdp_console = false + +# Does the test environment support instance rescue mode? (boolean +# value) +#rescue = true + +# Does the test environment support resizing? (boolean value) +#resize = false + +# Does the test environment support shelving/unshelving? (boolean +# value) +#shelve = true + +# Does the test environment support creating snapshot images of +# running instances? (boolean value) +snapshot = true + +# Enable Spice console. This configuration value should be same as +# [nova.spice]->enabled in nova.conf (boolean value) +spice_console = false + +# Does the test environment support suspend/resume? (boolean value) +#suspend = true + +# Enable VNC console. This configuration value should be same as +# [nova.vnc]->vnc_enabled in nova.conf (boolean value) +vnc_console = true + +# If false skip all v2 api tests with xml (boolean value) +#xml_api_v2 = true + + +[dashboard] + +# +# From tempest.config +# + +# Where the dashboard can be found (string value) +dashboard_url = http://{{ CONTROLLER_HOST_ADDRESS }}/horizon + +# Login page for the dashboard (string value) +login_url = http://{{ CONTROLLER_HOST_ADDRESS }}/horizon/auth/login/ + + +[data_processing] + +# +# From tempest.config +# + +# Catalog type of the data processing service. (string value) +#catalog_type = data_processing + +# The endpoint type to use for the data processing service. (string +# value) +#endpoint_type = publicURL + + +[database] + +# +# From tempest.config +# + +# Catalog type of the Database service. (string value) +#catalog_type = database + +# Current database version to use in database tests. (string value) +#db_current_version = v1.0 + +# Valid primary flavor to use in database tests. (string value) +#db_flavor_ref = 1 + + +[debug] + +# +# From tempest.config +# + +# Enable diagnostic commands (boolean value) +#enable = true + +# A regex to determine which requests should be traced. This is a +# regex to match the caller for rest client requests to be able to +# selectively trace calls out of specific classes and methods. It +# largely exists for test development, and is not expected to be used +# in a real deploy of tempest. This will be matched against the +# discovered ClassName:method in the test environment. Expected +# values for this field are: * ClassName:test_method_name - traces +# one test_method * ClassName:setUp(Class) - traces specific setup +# functions * ClassName:tearDown(Class) - traces specific teardown +# functions * ClassName:_run_cleanups - traces the cleanup functions +# If nothing is specified, this feature is not enabled. To trace +# everything specify .* as the regex. (string value) +#trace_requests = + + +[identity] + +# +# From tempest.config +# + +# Admin domain name for authentication (Keystone V3).The same domain +# applies to user and project (string value) +#admin_domain_name = + +# API key to use when authenticating as admin. (string value) +admin_password = {{ KEYSTONE_ADMIN_PASSWORD }} + +# Role required to administrate keystone. (string value) +admin_role = admin + +# Administrative Tenant name to use for Keystone API requests. (string +# value) +admin_tenant_name = admin + +# Administrative Username to use for Keystone API requests. (string +# value) +admin_username = admin + +# Alternate domain name for authentication (Keystone V3).The same +# domain applies to user and project (string value) +#alt_domain_name = + +# API key to use when authenticating as alternate user. (string value) +#alt_password = + +# Alternate user's Tenant name to use for Nova API requests. (string +# value) +#alt_tenant_name = + +# Username of alternate user to use for Nova API requests. (string +# value) +#alt_username = + +# Identity API version to be used for authentication for API tests. +# (string value) +auth_version = v2 + +# Catalog type of the Identity service. (string value) +catalog_type = identity + +# Set to True if using self-signed SSL certificates. (boolean value) +#disable_ssl_certificate_validation = false + +# Domain name for authentication (Keystone V3).The same domain applies +# to user and project (string value) +#domain_name = + +# The endpoint type to use for the identity service. (string value) +#endpoint_type = publicURL + +# API key to use when authenticating. (string value) +password = {{ NOVA_SERVICE_PASSWORD }} + +# The identity region name to use. Also used as the other services' +# region name unless they are set explicitly. If no such region is +# found in the service catalog, the first found one is used. (string +# value) +#region = RegionOne + +# Tenant name to use for Nova API requests. (string value) +tenant_name = service + +# Full URI of the OpenStack Identity API (Keystone), v2 (string value) +uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0/ + +# Full URI of the OpenStack Identity API (Keystone), v3 (string value) +# +# Tempest complains if we don't set any uri_v3, even if it's disabled. +uri_v3 = + +# Username to use for Nova API requests. (string value) +username = {{ NOVA_SERVICE_USER }} + + +[identity-feature-enabled] + +# +# From tempest.config +# + +# Is the v2 identity API enabled (boolean value) +api_v2 = true + +# Is the v3 identity API enabled (boolean value) +api_v3 = false + +# Does the identity service have delegation and impersonation enabled +# (boolean value) +#trust = true + + +[image] + +# +# From tempest.config +# + +# Catalog type of the Image service. (string value) +catalog_type = image + +# The endpoint type to use for the image service. (string value) +endpoint_type = publicURL + +# http accessible image (string value) +http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz + +# The image region name to use. If empty, the value of identity.region +# is used instead. If no such region is found in the service catalog, +# the first found one is used. (string value) +#region = + + +[image-feature-enabled] + +# +# From tempest.config +# + +# Is the v1 image API enabled (boolean value) +#api_v1 = true + +# Is the v2 image API enabled (boolean value) +api_v2 = true + + +[input-scenario] + +# +# From tempest.config +# + +# Matching flavors become parameters for scenario tests (string value) +#flavor_regex = ^m1.nano$ + +# Matching images become parameters for scenario tests (string value) +#image_regex = ^cirros-0.3.1-x86_64-uec$ + +# SSH verification in tests is skippedfor matching images (string +# value) +#non_ssh_image_regex = ^.*[Ww]in.*$ + +# List of user mapped to regex to matching image names. (string value) +#ssh_user_regex = [["^.*[Cc]irros.*$", "root"]] + + +[messaging] + +# +# From tempest.config +# + +# Catalog type of the Messaging service. (string value) +#catalog_type = messaging + +# The maximum grace period for a claim (integer value) +#max_claim_grace = 43200 + +# The maximum ttl for a claim (integer value) +#max_claim_ttl = 43200 + +# The maximum size of a message body (integer value) +#max_message_size = 262144 + +# The maximum ttl for a message (integer value) +#max_message_ttl = 1209600 + +# The maximum number of messages per claim (integer value) +#max_messages_per_claim = 20 + +# The maximum number of queue message per page when listing (or) +# posting messages (integer value) +#max_messages_per_page = 20 + +# The maximum metadata size for a queue (integer value) +#max_queue_metadata = 65536 + +# The maximum number of queue records per page when listing queues +# (integer value) +#max_queues_per_page = 20 + + +[negative] + +# +# From tempest.config +# + +# Test generator class for all negative tests (string value) +#test_generator = tempest.common.generator.negative_generator.NegativeTestGenerator + + +[network] + +# +# From tempest.config +# + +# Time in seconds between network operation status checks. (integer +# value) +#build_interval = 1 + +# Timeout in seconds to wait for network operation to complete. +# (integer value) +#build_timeout = 300 + +# Catalog type of the Neutron service. (string value) +#catalog_type = network + +# List of dns servers whichs hould be used for subnet creation (list +# value) +#dns_servers = 8.8.8.8,8.8.4.4 + +# The endpoint type to use for the network service. (string value) +#endpoint_type = publicURL + +# Id of the public network that provides external connectivity (string +# value) +#public_network_id = + +# Id of the public router that provides external connectivity (string +# value) +#public_router_id = + +# The network region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + +# The cidr block to allocate tenant ipv4 subnets from (string value) +#tenant_network_cidr = 10.100.0.0/16 + +# The mask bits for tenant ipv4 subnets (integer value) +#tenant_network_mask_bits = 28 + +# The cidr block to allocate tenant ipv6 subnets from (string value) +#tenant_network_v6_cidr = 2003::/48 + +# The mask bits for tenant ipv6 subnets (integer value) +#tenant_network_v6_mask_bits = 64 + +# Whether tenant network connectivity should be evaluated directly +# (boolean value) +#tenant_networks_reachable = false + + +[network-feature-enabled] + +# +# From tempest.config +# + +# A list of enabled network extensions with a special entry all which +# indicates every extension is enabled. Empty list indicates all +# extensions are disabled (list value) +#api_extensions = all + +# Allow the execution of IPv6 tests (boolean value) +#ipv6 = true + +# Allow the execution of IPv6 subnet tests that use the extended IPv6 +# attributes ipv6_ra_mode and ipv6_address_mode (boolean value) +#ipv6_subnet_attributes = false + + +[object-storage] + +# +# From tempest.config +# + +# Catalog type of the Object-Storage service. (string value) +#catalog_type = object-store + +# Number of seconds to wait while looping to check the status of a +# container to container synchronization (integer value) +#container_sync_interval = 5 + +# Number of seconds to time on waiting for a container to container +# synchronization complete. (integer value) +#container_sync_timeout = 120 + +# The endpoint type to use for the object-store service. (string +# value) +#endpoint_type = publicURL + +# Role to add to users created for swift tests to enable creating +# containers (string value) +#operator_role = Member + +# The object-storage region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + +# User role that has reseller admin (string value) +#reseller_admin_role = ResellerAdmin + + +[object-storage-feature-enabled] + +# +# From tempest.config +# + +# Execute (old style) container-sync tests (boolean value) +#container_sync = true + +# Execute discoverability tests (boolean value) +#discoverability = true + +# A list of the enabled optional discoverable apis. A single entry, +# all, indicates that all of these features are expected to be enabled +# (list value) +#discoverable_apis = all + +# Execute object-versioning tests (boolean value) +#object_versioning = true + + +[orchestration] + +# +# From tempest.config +# + +# Time in seconds between build status checks. (integer value) +#build_interval = 1 + +# Timeout in seconds to wait for a stack to build. (integer value) +#build_timeout = 1200 + +# Catalog type of the Orchestration service. (string value) +#catalog_type = orchestration + +# The endpoint type to use for the orchestration service. (string +# value) +#endpoint_type = publicURL + +# Name of heat-cfntools enabled image to use when launching test +# instances. (string value) +#image_ref = + +# Instance type for tests. Needs to be big enough for a full OS plus +# the test workload (string value) +#instance_type = m1.micro + +# Name of existing keypair to launch servers with. (string value) +#keypair_name = + +# Value must match heat configuration of the same name. (integer +# value) +#max_resources_per_stack = 1000 + +# Value must match heat configuration of the same name. (integer +# value) +#max_template_size = 524288 + +# The orchestration region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + + +[scenario] + +# +# From tempest.config +# + +# AKI image file name (string value) +#aki_img_file = cirros-0.3.1-x86_64-vmlinuz + +# AMI image file name (string value) +#ami_img_file = cirros-0.3.1-x86_64-blank.img + +# ARI image file name (string value) +#ari_img_file = cirros-0.3.1-x86_64-initrd + +# Image container format (string value) +#img_container_format = bare + +# Directory containing image files (string value) +#img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec + +# Image disk format (string value) +#img_disk_format = qcow2 + +# Image file name (string value) +# Deprecated group/name - [DEFAULT]/qcow2_img_file +#img_file = cirros-0.3.1-x86_64-disk.img + +# specifies how many resources to request at once. Used for large +# operations testing. (integer value) +#large_ops_number = 0 + +# ssh username for the image file (string value) +#ssh_user = cirros + + +[service_available] + +# +# From tempest.config +# + +# Whether or not Ceilometer is expected to be available (boolean +# value) +ceilometer = false + +# Whether or not cinder is expected to be available (boolean value) +cinder = true + +# Whether or not glance is expected to be available (boolean value) +glance = true + +# Whether or not Heat is expected to be available (boolean value) +heat = false + +# Whether or not Horizon is expected to be available (boolean value) +horizon = true + +# Whether or not Ironic is expected to be available (boolean value) +ironic = false + +# Whether or not neutron is expected to be available (boolean value) +neutron = true + +# Whether or not nova is expected to be available (boolean value) +nova = true + +# Whether or not Sahara is expected to be available (boolean value) +sahara = false + +# Whether or not swift is expected to be available (boolean value) +swift = false + +# Whether or not Trove is expected to be available (boolean value) +trove = false + +# Whether or not Zaqar is expected to be available (boolean value) +zaqar = false + + +[stress] + +# +# From tempest.config +# + +# Controller host. (string value) +#controller = + +# The number of threads created while stress test. (integer value) +#default_thread_number_per_action = 4 + +# Allows a full cleaning process after a stress test. Caution : this +# cleanup will remove every objects of every tenant. (boolean value) +#full_clean_stack = false + +# Prevent the cleaning (tearDownClass()) between each stress test run +# if an exception occurs during this run. (boolean value) +#leave_dirty_stack = false + +# time (in seconds) between log file error checks. (integer value) +#log_check_interval = 60 + +# Maximum number of instances to create during test. (integer value) +#max_instances = 16 + +# Directory containing log files on the compute nodes (string value) +#nova_logdir = + +# Controller host. (string value) +#target_controller = + +# regexp for list of log files. (string value) +#target_logfiles = + +# Path to private key. (string value) +#target_private_key_path = + +# ssh user. (string value) +#target_ssh_user = + + +[telemetry] + +# +# From tempest.config +# + +# Catalog type of the Telemetry service. (string value) +#catalog_type = metering + +# The endpoint type to use for the telemetry service. (string value) +#endpoint_type = publicURL + +# This variable is used as flag to enable notification tests (boolean +# value) +#too_slow_to_test = true + + +[volume] + +# +# From tempest.config +# + +# Name of the backend1 (must be declared in cinder.conf) (string +# value) +backend1_name = LVM_iSCSI + +# Name of the backend2 (must be declared in cinder.conf) (string +# value) +#backend2_name = BACKEND_2 + +# Time in seconds between volume availability checks. (integer value) +#build_interval = 1 + +# Timeout in seconds to wait for a volume to become available. +# (integer value) +#build_timeout = 300 + +# Catalog type of the Volume Service (string value) +catalog_type = volume + +# Disk format to use when copying a volume to image (string value) +disk_format = raw + +# The endpoint type to use for the volume service. (string value) +endpoint_type = publicURL + +# The volume region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + +# Backend protocol to target when creating volume types (string value) +storage_protocol = iSCSI + +# Backend vendor to target when creating volume types (string value) +#vendor_name = Open Source + +# Default size in GB for volumes created by volumes tests (integer +# value) +volume_size = 1 + + +[volume-feature-enabled] + +# +# From tempest.config +# + +# A list of enabled volume extensions with a special entry all which +# indicates every extension is enabled. Empty list indicates all +# extensions are disabled (list value) +#api_extensions = all + +# Is the v1 volume API enabled (boolean value) +api_v1 = true + +# Is the v2 volume API enabled (boolean value) +api_v2 = true + +# Runs Cinder volumes backup test (boolean value) +backup = true + +# Runs Cinder multi-backend test (requires 2 backends) (boolean value) +multi_backend = false + +# Runs Cinder volume snapshot test (boolean value) +snapshot = true diff --git a/install-files/openstack/manifest b/install-files/openstack/manifest new file mode 100644 index 00000000..aa4d5430 --- /dev/null +++ b/install-files/openstack/manifest @@ -0,0 +1,190 @@ +0040755 0 0 /etc/horizon +0100644 0 0 /etc/horizon/apache-horizon.conf +0040755 0 0 /etc/horizon/openstack_dashboard +0100644 0 0 /etc/horizon/openstack_dashboard/local_settings.py +template 0100644 0 0 /etc/tempest/tempest.conf +0040755 0 0 /usr/share/openstack +0100644 0 0 /usr/share/openstack/hosts +0040755 0 0 /usr/share/openstack/ceilometer +0100644 0 0 /usr/share/openstack/ceilometer-config.yml +0100644 0 0 /usr/share/openstack/ceilometer-db.yml +0100644 0 0 /usr/share/openstack/ceilometer/ceilometer.conf +0040755 0 0 /usr/share/openstack/cinder +0100644 0 0 /usr/share/openstack/cinder-config.yml +0100644 0 0 /usr/share/openstack/cinder-db.yml +0100644 0 0 /usr/share/openstack/cinder-lvs.yml +0100644 0 0 /usr/share/openstack/cinder/cinder.conf +0100644 0 0 /usr/share/openstack/cinder/api-paste.ini +0100644 0 0 /usr/share/openstack/cinder/policy.json +0040755 0 0 /usr/share/openstack/extras +0100644 0 0 /usr/share/openstack/extras/00-disable-device.network +0100644 0 0 /usr/share/openstack/extras/60-device-dhcp.network +0100644 0 0 /usr/share/openstack/glance.yml +0040755 0 0 /usr/share/openstack/glance +0100644 0 0 /usr/share/openstack/glance/logging.conf +0100644 0 0 /usr/share/openstack/glance/glance-api.conf +0100644 0 0 /usr/share/openstack/glance/glance-registry.conf +0100644 0 0 /usr/share/openstack/glance/glance-scrubber.conf +0100644 0 0 /usr/share/openstack/glance/glance-cache.conf +0100644 0 0 /usr/share/openstack/glance/schema-image.json +0100644 0 0 /usr/share/openstack/glance/policy.json +0100644 0 0 /usr/share/openstack/glance/glance-api-paste.ini +0100644 0 0 /usr/share/openstack/glance/glance-registry-paste.ini +0100644 0 0 /usr/share/openstack/horizon.yml +0040755 0 0 /usr/share/openstack/ironic +0100644 0 0 /usr/share/openstack/ironic.yml +0100644 0 0 /usr/share/openstack/ironic/ironic.conf +0100644 0 0 /usr/share/openstack/ironic/policy.json +0100644 0 0 /usr/share/openstack/iscsi.yml +0100644 0 0 /usr/share/openstack/keystone.yml +0040755 0 0 /usr/share/openstack/keystone +0100644 0 0 /usr/share/openstack/keystone/logging.conf +0100644 0 0 /usr/share/openstack/keystone/keystone.conf +0100644 0 0 /usr/share/openstack/keystone/policy.json +0100644 0 0 /usr/share/openstack/keystone/keystone-paste.ini +0100644 0 0 /usr/share/openstack/network.yml +0040755 0 0 /usr/share/openstack/neutron +0100644 0 0 /usr/share/openstack/neutron-config.yml +0100644 0 0 /usr/share/openstack/neutron-db.yml +0100644 0 0 /usr/share/openstack/neutron/neutron.conf +0100644 0 0 /usr/share/openstack/neutron/api-paste.ini +0100644 0 0 /usr/share/openstack/neutron/policy.json +0100644 0 0 /usr/share/openstack/neutron/l3_agent.ini +0100644 0 0 /usr/share/openstack/neutron/dhcp_agent.ini +0100644 0 0 /usr/share/openstack/neutron/lbaas_agent.ini +0100644 0 0 /usr/share/openstack/neutron/metadata_agent.ini +0100644 0 0 /usr/share/openstack/neutron/fwaas_driver.ini +0100644 0 0 /usr/share/openstack/neutron/metering_agent.ini +0100644 0 0 /usr/share/openstack/neutron/vpn_agent.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/ +0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch +0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl +0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs +0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs +0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README +0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README +0040755 0 0 /usr/share/openstack/neutron/plugins/brocade +0100644 0 0 /usr/share/openstack/neutron/plugins/brocade/brocade.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/cisco +0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/embrane +0100644 0 0 /usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/hyperv +0100644 0 0 /usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/ibm +0100644 0 0 /usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/linuxbridge +0100644 0 0 /usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/metaplugin +0100644 0 0 /usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/midonet +0100644 0 0 /usr/share/openstack/neutron/plugins/midonet/midonet.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/ml2 +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini +0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/mlnx +0100644 0 0 /usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/nec +0100644 0 0 /usr/share/openstack/neutron/plugins/nec/nec.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/nuage +0100644 0 0 /usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/oneconvergence +0100644 0 0 /usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/opencontrail +0100644 0 0 /usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/openvswitch +0100644 0 0 /usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/plumgrid +0100644 0 0 /usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini +0040755 0 0 /usr/share/openstack/neutron/plugins/vmware +0100644 0 0 /usr/share/openstack/neutron/plugins/vmware/nsx.ini +0040755 0 0 /usr/share/openstack/nova +0100644 0 0 /usr/share/openstack/nova-config.yml +0100644 0 0 /usr/share/openstack/nova-db.yml +0100644 0 0 /usr/share/openstack/nova/logging.conf +0100644 0 0 /usr/share/openstack/nova/nova.conf +0100644 0 0 /usr/share/openstack/nova/nova-compute.conf +0100644 0 0 /usr/share/openstack/nova/policy.json +0100644 0 0 /usr/share/openstack/nova/cells.json +0100644 0 0 /usr/share/openstack/nova/api-paste.ini +0100644 0 0 /usr/share/openstack/openvswitch.yml +0040755 0 0 /usr/share/openstack/postgres +0100644 0 0 /usr/share/openstack/postgres.yml +0100644 0 0 /usr/share/openstack/postgres/pg_hba.conf +0100644 0 0 /usr/share/openstack/postgres/postgresql.conf +0040755 0 0 /usr/share/openstack/rabbitmq +0100644 0 0 /usr/share/openstack/rabbitmq/rabbitmq-env.conf +0100644 0 0 /usr/share/openstack/rabbitmq/rabbitmq.config +0040755 0 0 /usr/lib/sysctl.d +0100644 0 0 /usr/lib/sysctl.d/neutron.conf +0100644 0 0 /usr/lib/systemd/system/apache-httpd.service +0100644 0 0 /usr/lib/systemd/system/iscsi-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-keystone.service +0100644 0 0 /usr/lib/systemd/system/openstack-keystone-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-glance-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-glance-api.service +0100644 0 0 /usr/lib/systemd/system/openstack-glance-registry.service +0100644 0 0 /usr/lib/systemd/system/openstack-horizon-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-ironic-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-ironic-api.service +0100644 0 0 /usr/lib/systemd/system/openstack-ironic-conductor.service +0100644 0 0 /usr/lib/systemd/system/openstack-network-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-neutron-config-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-neutron-db-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-neutron-server.service +0100644 0 0 /usr/lib/systemd/system/openstack-neutron-metadata-agent.service +0100644 0 0 /usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service +0100644 0 0 /usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service +0100644 0 0 /usr/lib/systemd/system/openstack-neutron-dhcp-agent.service +0100644 0 0 /usr/lib/systemd/system/openstack-neutron-l3-agent.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-config-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-db-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-compute.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-conductor.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-api.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-scheduler.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-consoleauth.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-novncproxy.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-cert.service +0100644 0 0 /usr/lib/systemd/system/openstack-nova-serialproxy.service +0100644 0 0 /usr/lib/systemd/system/rabbitmq-server.service +0100644 0 0 /usr/lib/systemd/system/openstack-cinder-config-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-cinder-db-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-cinder-lv-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-cinder-api.service +0100644 0 0 /usr/lib/systemd/system/openstack-cinder-scheduler.service +0100644 0 0 /usr/lib/systemd/system/openstack-cinder-volume.service +0100644 0 0 /usr/lib/systemd/system/openstack-cinder-backup.service +0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-config-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-db-setup.service +0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service +0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service +0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-api.service +0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-central.service +0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-collector.service +0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-compute.service +0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-notification.service +0100644 0 0 /usr/lib/systemd/system/openvswitch-setup.service +0100644 0 0 /usr/lib/systemd/system/openvswitch-db-server.service +0100644 0 0 /usr/lib/systemd/system/openvswitch.service +0100644 0 0 /usr/lib/systemd/system/postgres-server.service +0100644 0 0 /usr/lib/systemd/system/postgres-server-setup.service +0100644 0 0 /usr/share/openstack/swift-controller.yml +0100644 0 0 /usr/lib/systemd/system/swift-controller-setup.service +0100644 0 0 /usr/lib/systemd/system/swift-proxy.service +0040755 0 0 /usr/share/swift +0040755 0 0 /usr/share/swift/etc +0040755 0 0 /usr/share/swift/etc/swift +0100644 0 0 /usr/share/swift/etc/swift/proxy-server.j2 diff --git a/install-files/openstack/usr/lib/sysctl.d/neutron.conf b/install-files/openstack/usr/lib/sysctl.d/neutron.conf new file mode 100644 index 00000000..644ca116 --- /dev/null +++ b/install-files/openstack/usr/lib/sysctl.d/neutron.conf @@ -0,0 +1,3 @@ +# Disable rp filtering, enabling forwarding is handled by networkd +net.ipv4.conf.all.rp_filter=0 +net.ipv4.conf.default.rp_filter=0 diff --git a/install-files/openstack/usr/lib/systemd/system/apache-httpd.service b/install-files/openstack/usr/lib/systemd/system/apache-httpd.service new file mode 100644 index 00000000..e2a840c6 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/apache-httpd.service @@ -0,0 +1,16 @@ +[Unit] +Description=Apache Web Server +After=network.target remote-fs.target nss-lookup.target +Wants=network.target + +[Service] +Type=forking +PIDFile=/var/run/httpd.pid +ExecStart=/usr/sbin/apachectl start +ExecStop=/usr/sbin/apachectl graceful-stop +ExecReload=/usr/sbin/apachectl graceful +PrivateTmp=true +LimitNOFILE=infinity + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/iscsi-setup.service b/install-files/openstack/usr/lib/systemd/system/iscsi-setup.service new file mode 100644 index 00000000..4cb10045 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/iscsi-setup.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run iscsi-setup Ansible scripts +Before=iscsid.service target.service +Wants=iscsid.service target.service + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/iscsi.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service new file mode 100644 index 00000000..6e3ada59 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack ceilometer alarm evaluation service +ConditionPathExists=/etc/ceilometer/ceilometer.conf +After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=ceilometer +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/ceilometer-alarm-evaluator --config-file /etc/ceilometer/ceilometer.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service new file mode 100644 index 00000000..7a3e1c91 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack ceilometer alarm notification service +ConditionPathExists=/etc/ceilometer/ceilometer.conf +After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=ceilometer +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/ceilometer-alarm-notifier --config-file /etc/ceilometer/ceilometer.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service new file mode 100644 index 00000000..eb0293bf --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack ceilometer API service +ConditionPathExists=/etc/ceilometer/ceilometer.conf +After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=ceilometer +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/ceilometer-api --config-file /etc/ceilometer/ceilometer.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service new file mode 100644 index 00000000..a1bc11ee --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack ceilometer central agent +ConditionPathExists=/etc/ceilometer/ceilometer.conf +After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service openstack-ceilometer-collector.service +Wants=network-online.target + +[Service] +Type=simple +User=ceilometer +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/ceilometer-agent-central --config-file /etc/ceilometer/ceilometer.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service new file mode 100644 index 00000000..dafc3ac7 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack ceilometer collection service +ConditionPathExists=/etc/ceilometer/ceilometer.conf +After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=ceilometer +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/ceilometer-collector --config-file /etc/ceilometer/ceilometer.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service new file mode 100644 index 00000000..9fe8a1e6 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack ceilometer compute agent +ConditionPathExists=/etc/ceilometer/ceilometer.conf +After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=ceilometer +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/ceilometer-agent-compute --config-file /etc/ceilometer/ceilometer.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service new file mode 100644 index 00000000..c3e809d7 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run ceilometer-config-setup Ansible scripts +ConditionPathExists=/etc/openstack/ceilometer.conf + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ceilometer-config.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service new file mode 100644 index 00000000..7a785227 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service @@ -0,0 +1,13 @@ +[Unit] +Description=Run ceilometer-db-setup Ansible scripts +ConditionPathExists=/etc/openstack/ceilometer.conf +After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-ceilometer-config-setup.service +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ceilometer-db.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service new file mode 100644 index 00000000..6696116e --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack ceilometer notification agent +ConditionPathExists=/etc/ceilometer/ceilometer.conf +After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service openstack-ceilometer-collector.service +Wants=network-online.target + +[Service] +Type=simple +User=ceilometer +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/ceilometer-agent-notification --config-file /etc/ceilometer/ceilometer.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-api.service new file mode 100644 index 00000000..a284f31d --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-api.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Volume Service (code-named Cinder) API server +ConditionPathExists=/etc/cinder/cinder.conf +After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=cinder +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/cinder-api --config-file /etc/cinder/cinder.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-backup.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-backup.service new file mode 100644 index 00000000..c14e13aa --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-backup.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Cinder backup server +ConditionPathExists=/etc/cinder/cinder.conf +After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-volume.service +Wants=network-online.target + +[Service] +Type=simple +User=cinder +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/cinder-backup --config-file /etc/cinder/cinder.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service new file mode 100644 index 00000000..1c966933 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run cinder-config-setup Ansible scripts +ConditionPathExists=/etc/openstack/cinder.conf + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-config.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service new file mode 100644 index 00000000..a3c66d67 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service @@ -0,0 +1,13 @@ +[Unit] +Description=Run cinder-db-setup Ansible scripts +ConditionPathExists=/etc/openstack/cinder.conf +After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-cinder-config-setup.service +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-db.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service new file mode 100644 index 00000000..82e9b08d --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run cinder-lvs-setup Ansible scripts +ConditionPathExists=/etc/openstack/cinder.conf +Wants=lvm2-lvmetad.service + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-lvs.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service new file mode 100644 index 00000000..f205aaff --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Cinder scheduler server +ConditionPathExists=/etc/cinder/cinder.conf +After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-volume.service +Wants=network-online.target + +[Service] +Type=simple +User=cinder +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/cinder-scheduler --config-file /etc/cinder/cinder.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-volume.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-volume.service new file mode 100644 index 00000000..c56ee693 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-volume.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Cinder volume server +ConditionPathExists=/etc/cinder/cinder.conf +After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-lv-setup.service lvm2-lvmetad.service iscsid.service target.service +Wants=network-online.target + +[Service] +Type=simple +User=cinder +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/cinder-volume --config-file /etc/cinder/cinder.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-glance-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-glance-api.service new file mode 100644 index 00000000..4c34ff10 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-glance-api.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenStack Image Service (code-named Glance) API server +ConditionPathExists=/etc/glance/glance-api.conf +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=glance +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/glance-api --config-file /etc/glance/glance-api.conf + +[Install] +WantedBy=multi-user.target + diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-glance-registry.service b/install-files/openstack/usr/lib/systemd/system/openstack-glance-registry.service new file mode 100644 index 00000000..d53c8b33 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-glance-registry.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenStack Image Service (code-named Glance) Registry server +ConditionPathExists=/etc/glance/glance-registry.conf +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=glance +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/glance-registry --config-file /etc/glance/glance-registry.conf + +[Install] +WantedBy=multi-user.target + diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-glance-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-glance-setup.service new file mode 100644 index 00000000..43810797 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-glance-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run glance-setup Ansible scripts +ConditionPathExists=/etc/openstack/glance.conf +After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service +Wants=network-online.target + +[Service] +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/glance.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-horizon-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-horizon-setup.service new file mode 100644 index 00000000..9ec3197a --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-horizon-setup.service @@ -0,0 +1,10 @@ +[Unit] +Description=Run horizon-setup Ansible scripts +After=local-fs.target +Before=apache-httpd.service + +[Service] +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/horizon.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ironic-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-api.service new file mode 100644 index 00000000..5a286a95 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-api.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenStack Bare Metal Provisioning Service (code-named Ironic) API server +ConditionPathExists=/etc/ironic/ironic.conf +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=ironic +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/ironic-api --config-file /etc/ironic/ironic.conf + +[Install] +WantedBy=multi-user.target + diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service new file mode 100644 index 00000000..b3b226e0 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenStack Bare Metal Provisioning Service (code-named Ironic) Conductor server +ConditionPathExists=/etc/ironic/ironic.conf +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=ironic +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/ironic-conductor --config-file /etc/ironic/ironic.conf + +[Install] +WantedBy=multi-user.target + diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ironic-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-setup.service new file mode 100644 index 00000000..e3a58eb5 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-setup.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run ironic-setup Ansible scripts +ConditionPathExists=/etc/openstack/ironic.conf +After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service +Wants=network-online.target + +[Service] +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ironic.yml + +[Install] +WantedBy=multi-user.target + diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-keystone-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-keystone-setup.service new file mode 100644 index 00000000..db9d0b2b --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-keystone-setup.service @@ -0,0 +1,14 @@ +[Unit] +Description=Run keystone-setup Ansible scripts +ConditionPathExists=/etc/openstack/keystone.conf +After=local-fs.target network-online.target postgres-server-setup.service +Wants=network-online.target + +[Service] +# Oneshot, since others setup have to wait until this service finishes +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/keystone.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-keystone.service b/install-files/openstack/usr/lib/systemd/system/openstack-keystone.service new file mode 100644 index 00000000..6f6ff644 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-keystone.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenStack Identity Service (code-named Keystone) +ConditionPathExists=/etc/keystone/keystone.conf +After=network-online.target +Wants=network-online.target + +[Service] +Type=notify +Restart=always +User=keystone +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/keystone-all --config-file /etc/keystone/keystone.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-network-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-network-setup.service new file mode 100644 index 00000000..021370d9 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-network-setup.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run Ansible scripts to configure internal network for OpenStack +After=openvswitch-setup.service openvswitch.service +Before=systemd-networkd.service + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/network.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service new file mode 100644 index 00000000..b74f44ab --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service @@ -0,0 +1,13 @@ +[Unit] +Description=Run neutron-config-setup Ansible scripts +ConditionPathExists=/etc/openstack/neutron.conf +After=network-online.target openstack-keystone-setup.service +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/neutron-config.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service new file mode 100644 index 00000000..5d07da2e --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service @@ -0,0 +1,13 @@ +[Unit] +Description=Run neutron-db-setup Ansible scripts +ConditionPathExists=/etc/openstack/neutron.conf +After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-neutron-config-setup.service +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/neutron-db.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service new file mode 100644 index 00000000..9080f3c1 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service @@ -0,0 +1,17 @@ +[Unit] +Description=Neutron DHCP Agent +ConditionPathExists=/etc/neutron/neutron.conf +After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openstack-neutron-ovs-cleanup.service +Wants=network-online.target + +[Service] +Type=simple +User=neutron +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/neutron-dhcp-agent \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/dhcp_agent.ini + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service new file mode 100644 index 00000000..76efea5c --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service @@ -0,0 +1,18 @@ +[Unit] +Description=Neutron Layer 3 Agent +ConditionPathExists=/etc/neutron/neutron.conf +After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openstack-neutron-ovs-cleanup.service +Wants=network-online.target + +[Service] +Type=simple +User=neutron +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/neutron-l3-agent \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/l3_agent.ini \ + --config-file=/etc/neutron/fwaas_driver.ini + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service new file mode 100644 index 00000000..20540e4c --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service @@ -0,0 +1,17 @@ +[Unit] +Description=Neutron Metadata Plugin Agent +ConditionPathExists=/etc/neutron/neutron.conf +After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=neutron +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/neutron-metadata-agent \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/metadata_agent.ini + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service new file mode 100644 index 00000000..f5709028 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service @@ -0,0 +1,18 @@ +[Unit] +Description=Neutron OVS cleanup +ConditionPathExists=/etc/neutron/neutron.conf +ConditionFileIsExecutable=/usr/bin/neutron-ovs-cleanup +After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openvswitch.service +Before=openstack-neutron-plugin-openvswitch-agent.service +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +StandardOutput=null +StandardError=null +User=neutron +ExecStart=/usr/bin/neutron-ovs-cleanup --config-file /etc/neutron/neutron.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service new file mode 100644 index 00000000..6c579a62 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service @@ -0,0 +1,17 @@ +[Unit] +Description=Neutron OpenvSwitch Plugin Agent +ConditionPathExists=/etc/neutron/neutron.conf +After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=neutron +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/neutron-openvswitch-agent \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-server.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-server.service new file mode 100644 index 00000000..6376c3d8 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-server.service @@ -0,0 +1,17 @@ +[Unit] +Description=Neutron Api Server +ConditionPathExists=/etc/neutron/neutron.conf +After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=neutron +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/neutron-server \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-api.service new file mode 100644 index 00000000..521353db --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-api.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Compute Service (code-named Nova) API server +ConditionPathExists=/etc/nova/nova.conf +After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=nova +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/nova-api --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-cert.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-cert.service new file mode 100644 index 00000000..b3733816 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-cert.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova Cert +ConditionPathExists=/etc/nova/nova.conf +After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=nova +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/nova-cert --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-compute.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-compute.service new file mode 100644 index 00000000..4f9b8196 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-compute.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenStack Compute Service (code-named Nova) compute server +ConditionPathExists=/etc/nova/nova.conf +After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service libvirtd.service +Wants=network-online.target +Requires=libvirtd.service + +[Service] +Type=simple +User=nova +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-conductor.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-conductor.service new file mode 100644 index 00000000..4c0d7d43 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-conductor.service @@ -0,0 +1,16 @@ +[Unit] +Description=Database-access support for Compute nodes (nova-conductor) +ConditionPathExists=/etc/nova/nova.conf +After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service libvirtd.service +Wants=network-online.target +Requires=libvirtd.service + +[Service] +Type=simple +User=nova +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/nova-conductor --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service new file mode 100644 index 00000000..df669aa9 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run nova-config-setup Ansible scripts +ConditionPathExists=/etc/openstack/nova.conf + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/nova-config.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service new file mode 100644 index 00000000..e22780a9 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service @@ -0,0 +1,15 @@ +[Unit] +Description=Openstack Console Auth (nova-consoleauth) +ConditionPathExists=/etc/nova/nova.conf +After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=nova +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/nova-consoleauth --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service new file mode 100644 index 00000000..8e004327 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service @@ -0,0 +1,13 @@ +[Unit] +Description=Run nova-db-setup Ansible scripts +ConditionPathExists=/etc/openstack/nova.conf +After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-nova-config-setup.service +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/nova-db.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service new file mode 100644 index 00000000..8cbb20fd --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova NoVNC proxy +ConditionPathExists=/etc/nova/nova.conf +After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=nova +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/nova-novncproxy --config-file /etc/nova/nova.conf --web /usr/share/novnc + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service new file mode 100644 index 00000000..e89f0d3e --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova Scheduler +ConditionPathExists=/etc/nova/nova.conf +After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=nova +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/nova-scheduler --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service new file mode 100644 index 00000000..30af8305 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova Serial Proxy +ConditionPathExists=/etc/nova/nova.conf +After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service +Wants=network-online.target + +[Service] +Type=simple +User=nova +StandardOutput=null +StandardError=null +ExecStart=/usr/bin/nova-serialproxy --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openvswitch-db-server.service b/install-files/openstack/usr/lib/systemd/system/openvswitch-db-server.service new file mode 100644 index 00000000..34a7c812 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openvswitch-db-server.service @@ -0,0 +1,12 @@ +[Unit] +Description=Open vSwitch Database Server Daemon +After=local-fs.target + +[Service] +Type=forking +ExecStartPre=-/usr/bin/mkdir -p /var/run/openvswitch +ExecStart=/usr/sbin/ovsdb-server --remote=punix:/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,Open_vSwitch,manager_options --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --pidfile --detach + +[Install] +WantedBy=multi-user.target + diff --git a/install-files/openstack/usr/lib/systemd/system/openvswitch-setup.service b/install-files/openstack/usr/lib/systemd/system/openvswitch-setup.service new file mode 100644 index 00000000..8393ebbc --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openvswitch-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run openvswitch-setup Ansible scripts +After=local-fs.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/openstack/hosts /usr/share/openstack/openvswitch.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/openvswitch.service b/install-files/openstack/usr/lib/systemd/system/openvswitch.service new file mode 100644 index 00000000..113911f6 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/openvswitch.service @@ -0,0 +1,12 @@ +[Unit] +Description=Open vSwitch Daemon +Before=network-pre.target +Wants=network-pre.target + +[Service] +Type=forking +ExecStartPre=-/usr/bin/mkdir -p /var/run/openvswitch +ExecStart=/usr/sbin/ovs-vswitchd --pidfile --detach + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/postgres-server-setup.service b/install-files/openstack/usr/lib/systemd/system/postgres-server-setup.service new file mode 100644 index 00000000..202c0636 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/postgres-server-setup.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run postgres-setup Ansible scripts +ConditionPathExists=/etc/openstack/postgres.conf +After=local-fs.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/openstack/hosts /usr/share/openstack/postgres.yml + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/postgres-server.service b/install-files/openstack/usr/lib/systemd/system/postgres-server.service new file mode 100644 index 00000000..9e11f26d --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/postgres-server.service @@ -0,0 +1,26 @@ +[Unit] +Description=PostgreSQL database server +After=network-online.target +Wants=network-online.target + +[Service] +Type=forking +TimeoutSec=120 +User=postgres +Group=postgres + +Environment=PGROOT=/var/lib/pgsql + +SyslogIdentifier=postgres +PIDFile=/var/lib/pgsql/data/postmaster.pid + +ExecStart=/usr/bin/pg_ctl -s -D ${PGROOT}/data start -w -t 120 +ExecReload=/usr/bin/pg_ctl -s -D ${PGROOT}/data reload +ExecStop=/usr/bin/pg_ctl -s -D ${PGROOT}/data stop -m fast + +# Due to PostgreSQL's use of shared memory, OOM killer is often overzealous in +# killing Postgres, so adjust it downward +OOMScoreAdjust=-200 + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/rabbitmq-server.service b/install-files/openstack/usr/lib/systemd/system/rabbitmq-server.service new file mode 100644 index 00000000..1a20f3e4 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/rabbitmq-server.service @@ -0,0 +1,16 @@ +[Unit] +Description=RabbitMQ broker +After=network-online.target +Wants=network-online.target + +[Service] +Type=notify +User=rabbitmq +Group=rabbitmq +Environment=HOME=/var/lib/rabbitmq +WorkingDirectory=/var/lib/rabbitmq +ExecStart=/usr/sbin/rabbitmq-server +ExecStop=/usr/sbin/rabbitmqctl stop + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/swift-controller-setup.service b/install-files/openstack/usr/lib/systemd/system/swift-controller-setup.service new file mode 100644 index 00000000..ccfbcbe6 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/swift-controller-setup.service @@ -0,0 +1,13 @@ +[Unit] +Description=Run swift-controller-setup (once) +After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/swift-controller.yml +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/lib/systemd/system/swift-proxy.service b/install-files/openstack/usr/lib/systemd/system/swift-proxy.service new file mode 100644 index 00000000..7b0a2e17 --- /dev/null +++ b/install-files/openstack/usr/lib/systemd/system/swift-proxy.service @@ -0,0 +1,14 @@ +[Unit] +Description=OpenStack Swift Proxy Server +After=network-online.target swift-controller-setup.service memcached.service +Wants=network-online.target + +[Service] +Type=forking +PIDFile=/var/run/swift/proxy-server.pid +Restart=on-failure +ExecStart=/usr/bin/swift-init proxy-server start +ExecStop=/usr/bin/swift-init proxy-server stop + +[Install] +WantedBy=multi-user.target diff --git a/install-files/openstack/usr/share/openstack/ceilometer-config.yml b/install-files/openstack/usr/share/openstack/ceilometer-config.yml new file mode 100644 index 00000000..9850d84d --- /dev/null +++ b/install-files/openstack/usr/share/openstack/ceilometer-config.yml @@ -0,0 +1,36 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/ceilometer.conf" + tasks: +# Configure ceilometer + - name: Create the ceilometer user. + user: + name: ceilometer + comment: Openstack Ceilometer Daemons + shell: /sbin/nologin + home: /var/lib/ceilometer + + - name: Create the /var folders for ceilometer + file: + path: "{{ item }}" + state: directory + owner: ceilometer + group: ceilometer + with_items: + - /var/run/ceilometer + - /var/lock/ceilometer + - /var/log/ceilometer + - /var/lib/ceilometer + + - name: Create /etc/ceilometer directory + file: + path: /etc/ceilometer + state: directory + + - name: Add the configuration needed for ceilometer in /etc/ceilometer using templates + template: + src: /usr/share/openstack/ceilometer/{{ item }} + dest: /etc/ceilometer/{{ item }} + with_lines: + - cd /usr/share/openstack/ceilometer && find -type f diff --git a/install-files/openstack/usr/share/openstack/ceilometer-db.yml b/install-files/openstack/usr/share/openstack/ceilometer-db.yml new file mode 100644 index 00000000..717c7d7d --- /dev/null +++ b/install-files/openstack/usr/share/openstack/ceilometer-db.yml @@ -0,0 +1,50 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/ceilometer.conf" + tasks: + - name: Create ceilometer service user in service tenant + keystone_user: + user: "{{ CEILOMETER_SERVICE_USER }}" + password: "{{ CEILOMETER_SERVICE_PASSWORD }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Assign admin role to ceilometers service user in the service tenant + keystone_user: + role: admin + user: "{{ CEILOMETER_SERVICE_USER }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Add ceilometer endpoint + keystone_service: + name: ceilometer + type: metering + description: Openstack Metering Service + publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777 + internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777 + adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777 + region: regionOne + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Create postgresql user for ceilometer + postgresql_user: + name: "{{ CEILOMETER_DB_USER }}" + login_host: "{{ CONTROLLER_HOST_ADDRESS }}" + password: "{{ CEILOMETER_DB_PASSWORD }}" + sudo: yes + sudo_user: ceilometer + + - name: Create database for ceilometer services + postgresql_db: + name: ceilometer + owner: "{{ CEILOMETER_DB_USER }}" + login_host: "{{ CONTROLLER_HOST_ADDRESS }}" + sudo: yes + sudo_user: ceilometer + + - name: Initiate ceilometer database + command: ceilometer-dbsync + sudo: yes + sudo_user: ceilometer diff --git a/install-files/openstack/usr/share/openstack/ceilometer/ceilometer.conf b/install-files/openstack/usr/share/openstack/ceilometer/ceilometer.conf new file mode 100644 index 00000000..b572d40f --- /dev/null +++ b/install-files/openstack/usr/share/openstack/ceilometer/ceilometer.conf @@ -0,0 +1,1023 @@ +[DEFAULT] + +# +# Options defined in ceilometer.middleware +# + +# Exchanges name to listen for notifications. (multi valued) +#http_control_exchanges=nova +#http_control_exchanges=glance +#http_control_exchanges=neutron +#http_control_exchanges=cinder + + +# +# Options defined in ceilometer.pipeline +# + +# Configuration file for pipeline definition. (string value) +#pipeline_cfg_file=pipeline.yaml + + +# +# Options defined in ceilometer.sample +# + +# Source for samples emitted on this instance. (string value) +# Deprecated group/name - [DEFAULT]/counter_source +#sample_source=openstack + + +# +# Options defined in ceilometer.service +# + +# Name of this node, which must be valid in an AMQP key. Can +# be an opaque identifier. For ZeroMQ only, must be a valid +# host name, FQDN, or IP address. (string value) +#host=ceilometer + +# Dispatcher to process data. (multi valued) +#dispatcher=database + +# Number of workers for collector service. A single +# collector is enabled by default. (integer value) +#collector_workers=1 + +# Number of workers for notification service. A single +# notification agent is enabled by default. (integer value) +#notification_workers=1 + + +# +# Options defined in ceilometer.api.app +# + +# The strategy to use for auth: noauth or keystone. (string +# value) +auth_strategy=keystone + +# Deploy the deprecated v1 API. (boolean value) +#enable_v1_api=true + + +# +# Options defined in ceilometer.compute.notifications +# + +# Exchange name for Nova notifications. (string value) +#nova_control_exchange=nova + + +# +# Options defined in ceilometer.compute.util +# + +# List of metadata prefixes reserved for metering use. (list +# value) +#reserved_metadata_namespace=metering. + +# Limit on length of reserved metadata values. (integer value) +#reserved_metadata_length=256 + + +# +# Options defined in ceilometer.compute.virt.inspector +# + +# Inspector to use for inspecting the hypervisor layer. +# (string value) +#hypervisor_inspector=libvirt + + +# +# Options defined in ceilometer.compute.virt.libvirt.inspector +# + +# Libvirt domain type (valid options are: kvm, lxc, qemu, uml, +# xen). (string value) +#libvirt_type=kvm + +# Override the default libvirt URI (which is dependent on +# libvirt_type). (string value) +#libvirt_uri= + + +# +# Options defined in ceilometer.image.notifications +# + +# Exchange name for Glance notifications. (string value) +#glance_control_exchange=glance + + +# +# Options defined in ceilometer.network.notifications +# + +# Exchange name for Neutron notifications. (string value) +# Deprecated group/name - [DEFAULT]/quantum_control_exchange +#neutron_control_exchange=neutron + + +# +# Options defined in ceilometer.objectstore.swift +# + +# Swift reseller prefix. Must be on par with reseller_prefix +# in proxy-server.conf. (string value) +#reseller_prefix=AUTH_ + + +# +# Options defined in ceilometer.openstack.common.db.sqlalchemy.session +# + +# The file name to use with SQLite (string value) +#sqlite_db=ceilometer.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous=true + + +# +# Options defined in ceilometer.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in ceilometer.openstack.common.lockutils +# + +# Whether to disable inter-process locks. (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +#lock_path= + + +# +# Options defined in ceilometer.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error (boolean value) +#use_stderr=true + +# Format string to use for log messages with context (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN + +# Publish error events (boolean value) +#publish_errors=false + +# Make deprecations fatal (boolean value) +#fatal_deprecations=false + +# If an instance is passed with the log message, format it +# like this (string value) +#instance_format="[instance: %(uuid)s] " + +# If an instance UUID is passed with the log message, format +# it like this (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of logging configuration file. It does not disable +# existing loggers, but just appends specified logging +# configuration to any other existing logging options. Please +# see the Python logging module documentation for details on +# logging configuration files. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and then will be changed in J to honor RFC5424 +# (boolean value) +use_syslog=true + +# (Optional) Use syslog rfc5424 format for logging. If +# enabled, will add APP-NAME (RFC5424) before the MSG part of +# the syslog message. The old format without APP-NAME is +# deprecated in I, and will be removed in J. (boolean value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in ceilometer.openstack.common.middleware.sizelimit +# + +# The maximum body size per request, in bytes (integer value) +# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size +#max_request_body_size=114688 + + +# +# Options defined in ceilometer.openstack.common.notifier.api +# + +# Driver or drivers to handle sending notifications (multi +# valued) +#notification_driver= + +# Default notification level for outgoing notifications +# (string value) +#default_notification_level=INFO + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in ceilometer.openstack.common.notifier.rpc_notifier +# + +# AMQP topic used for OpenStack notifications (list value) +#notification_topics=notifications + + +# +# Options defined in ceilometer.openstack.common.policy +# + +# JSON file containing policy (string value) +#policy_file=policy.json + +# Rule enforced when requested rule is not found (string +# value) +#policy_default_rule=default + + +# +# Options defined in ceilometer.openstack.common.rpc +# + +# The messaging module to use, defaults to kombu. (string +# value) +rpc_backend=rabbit + +# Size of RPC thread pool (integer value) +#rpc_thread_pool_size=64 + +# Size of RPC connection pool (integer value) +#rpc_conn_pool_size=30 + +# Seconds to wait for a response from call or multicall +# (integer value) +#rpc_response_timeout=60 + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions + +# If passed, use a fake RabbitMQ provider (boolean value) +#fake_rabbit=false + +# AMQP exchange to connect to if using RabbitMQ or Qpid +# (string value) +#control_exchange=openstack + + +# +# Options defined in ceilometer.openstack.common.rpc.amqp +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + + +# +# Options defined in ceilometer.openstack.common.rpc.impl_kombu +# + +# If SSL is enabled, the SSL version to use. Valid values are +# TLSv1, SSLv23 and SSLv3. SSLv2 might be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled) (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled) (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL enabled) +# (string value) +#kombu_ssl_ca_certs= + +# The RabbitMQ broker address where a single node is used +# (string value) +rabbit_host = {{ RABBITMQ_HOST }} + + +# The RabbitMQ broker port where a single node is used +# (integer value) +rabbit_port= {{ RABBITMQ_PORT }} + +# RabbitMQ HA cluster host:port pairs (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ (boolean value) +rabbit_use_ssl=false + +# The RabbitMQ userid (string value) +rabbit_userid= {{ RABBITMQ_USER }} + +# The RabbitMQ password (string value) +rabbit_password = {{ RABBITMQ_PASSWORD }} + + +# The RabbitMQ virtual host (string value) +rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count) (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + + +# +# Options defined in ceilometer.openstack.common.rpc.impl_qpid +# + +# Qpid broker hostname (string value) +#qpid_hostname=localhost + +# Qpid broker port (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for qpid connection (string value) +#qpid_username= + +# Password for qpid connection (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl' (string value) +#qpid_protocol=tcp + +# Disable Nagle algorithm (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + + +# +# Options defined in ceilometer.openstack.common.rpc.impl_zmq +# + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver (string value) +#rpc_zmq_matchmaker=ceilometer.openstack.common.rpc.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1 (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=ceilometer + + +# +# Options defined in ceilometer.openstack.common.rpc.matchmaker +# + +# Heartbeat frequency (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + + +# +# Options defined in ceilometer.orchestration.notifications +# + +# Exchange name for Heat notifications (string value) +#heat_control_exchange=heat + + +# +# Options defined in ceilometer.storage +# + +# DEPRECATED - Database connection string. (string value) +#database_connection= + + +# +# Options defined in ceilometer.storage.sqlalchemy.models +# + +# MySQL engine to use. (string value) +#mysql_engine=InnoDB + + +# +# Options defined in ceilometer.volume.notifications +# + +# Exchange name for Cinder notifications. (string value) +cinder_control_exchange=cinder + + +[alarm] + +# +# Options defined in ceilometer.cli +# + +# Class to launch as alarm evaluation service. (string value) +#evaluation_service=ceilometer.alarm.service.SingletonAlarmService + + +# +# Options defined in ceilometer.alarm.notifier.rest +# + +# SSL Client certificate for REST notifier. (string value) +#rest_notifier_certificate_file= + +# SSL Client private key for REST notifier. (string value) +#rest_notifier_certificate_key= + +# Whether to verify the SSL Server certificate when calling +# alarm action. (boolean value) +#rest_notifier_ssl_verify=true + + +# +# Options defined in ceilometer.alarm.rpc +# + +# The topic that ceilometer uses for alarm notifier messages. +# (string value) +#notifier_rpc_topic=alarm_notifier + +# The topic that ceilometer uses for alarm partition +# coordination messages. (string value) +#partition_rpc_topic=alarm_partition_coordination + + +# +# Options defined in ceilometer.alarm.service +# + +# Period of evaluation cycle, should be >= than configured +# pipeline interval for collection of underlying metrics. +# (integer value) +# Deprecated group/name - [alarm]/threshold_evaluation_interval +#evaluation_interval=60 + + +# +# Options defined in ceilometer.api.controllers.v2 +# + +# Record alarm change events. (boolean value) +#record_history=true + + +[api] + +# +# Options defined in ceilometer.api +# + +# The port for the ceilometer API server. (integer value) +# Deprecated group/name - [DEFAULT]/metering_api_port +#port=8777 + +# The listen IP for the ceilometer API server. (string value) +#host=0.0.0.0 + + +[collector] + +# +# Options defined in ceilometer.collector +# + +# Address to which the UDP socket is bound. Set to an empty +# string to disable. (string value) +#udp_address=0.0.0.0 + +# Port to which the UDP socket is bound. (integer value) +#udp_port=4952 + + +[database] + +# +# Options defined in ceilometer.openstack.common.db.api +# + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + + +# +# Options defined in ceilometer.openstack.common.db.sqlalchemy.session +# + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +connection=postgresql://{{ CEILOMETER_DB_USER }}:{{ CEILOMETER_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/ceilometer + +# The SQLAlchemy connection string used to connect to the +# slave database (string value) +#slave_connection= + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + + +# +# Options defined in ceilometer.storage +# + +# Number of seconds that samples are kept in the database for +# (<= 0 means forever). (integer value) +#time_to_live=-1 + + +[dispatcher_file] + +# +# Options defined in ceilometer.dispatcher.file +# + +# Name and the location of the file to record meters. (string +# value) +#file_path= + +# The max size of the file. (integer value) +#max_bytes=0 + +# The max number of the files to keep. (integer value) +#backup_count=0 + + +[event] + +# +# Options defined in ceilometer.event.converter +# + +# Configuration file for event definitions. (string value) +#definitions_cfg_file=event_definitions.yaml + +# Drop notifications if no event definition matches. +# (Otherwise, we convert them with just the default traits) +# (boolean value) +#drop_unmatched_notifications=false + + +[keystone_authtoken] + +# +# Options defined in keystoneclient.middleware.auth_token +# + +# Prefix to prepend at the beginning of the path (string +# value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint (string +# value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint(http or https) +# (string value) +#auth_protocol=https + +# Complete public Identity API endpoint (string value) +auth_uri= http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 +identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 + + +# API version of the admin Identity API endpoint (string +# value) +#auth_version= + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# Allows to pass in the name of a fake http_handler callback +# function used instead of httplib.HTTPConnection or +# httplib.HTTPSConnection. Useful for unit testing where +# network is not available. (string value) +#http_handler= + +# Single shared secret with the Keystone configuration used +# for bootstrapping a Keystone installation, or otherwise +# bypassing the normal authentication process. (string value) +#admin_token= + +# Keystone account username (string value) +admin_user = {{ CEILOMETER_SERVICE_USER }} + +# Keystone account password (string value) +admin_password = {{ CEILOMETER_SERVICE_PASSWORD }} + +# Keystone service account tenant name to validate user tokens +# (string value) +admin_tenant_name = service + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPS connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# If defined, the memcache server(s) to use for caching (list +# value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive requests and validations, the +# middleware uses an in-memory cache for the tokens the +# Keystone API returns. This is only valid if memcache_servers +# is defined. Set to -1 to disable caching completely. +# (integer value) +#token_cache_time=300 + +# Value only used for unit testing (integer value) +#revocation_cache_time=1 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + + +[matchmaker_redis] + +# +# Options defined in ceilometer.openstack.common.rpc.matchmaker_redis +# + +# Host to locate redis (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server. (optional) (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in ceilometer.openstack.common.rpc.matchmaker_ring +# + +# Matchmaker ring file (JSON) (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[notification] + +# +# Options defined in ceilometer.notification +# + +# Acknowledge message when event persistence fails. (boolean +# value) +#ack_on_event_error=true + +# Save event details. (boolean value) +#store_events=false + + +[publisher] + +# +# Options defined in ceilometer.publisher.utils +# + +# Secret value for signing metering messages. (string value) +# Deprecated group/name - [DEFAULT]/metering_secret +# Deprecated group/name - [publisher_rpc]/metering_secret +# It should be set to some random value +metering_secret = {{ METERING_SECRET }} + +[publisher_rpc] + +# +# Options defined in ceilometer.publisher.rpc +# + +# The topic that ceilometer uses for metering messages. +# (string value) +#metering_topic=metering + + +[rpc_notifier2] + +# +# Options defined in ceilometer.openstack.common.notifier.rpc_notifier2 +# + +# AMQP topic(s) used for OpenStack notifications (list value) +#topics=notifications + + +[service_credentials] + +# +# Options defined in ceilometer.service +# + +# User name to use for OpenStack service access. (string +# value) +os_username = {{ CEILOMETER_SERVICE_USER }} + +# Password to use for OpenStack service access. (string value) +os_password = {{ CEILOMETER_SERVICE_PASSWORD }} + +# Tenant ID to use for OpenStack service access. (string +# value) +#os_tenant_id= + +# Tenant name to use for OpenStack service access. (string +# value) +os_tenant_name = service + +# Certificate chain for SSL validation. (string value) +#os_cacert= + +# Auth URL to use for OpenStack service access. (string value) +os_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 + +# Region name to use for OpenStack service endpoints. (string +# value) +os_region_name=regionOne + +# Type of endpoint in Identity service catalog to use for +# communication with OpenStack services. (string value) +os_endpoint_type=internalURL + +# Disables X.509 certificate validation when an SSL connection +# to Identity Service is established. (boolean value) +#insecure=false + + +[ssl] + +# +# Options defined in ceilometer.openstack.common.sslutils +# + +# CA certificate file to use to verify connecting clients +# (string value) +#ca_file= + +# Certificate file to use when starting the server securely +# (string value) +#cert_file= + +# Private key file to use when starting the server securely +# (string value) +#key_file= + + +[vmware] + +# +# Options defined in ceilometer.compute.virt.vmware.inspector +# + +# IP address of the VMware Vsphere host (string value) +#host_ip= + +# Username of VMware Vsphere (string value) +#host_username= + +# Password of VMware Vsphere (string value) +#host_password= + +# Number of times a VMware Vsphere API must be retried +# (integer value) +#api_retry_count=10 + +# Sleep time in seconds for polling an ongoing async task +# (floating point value) +#task_poll_interval=0.5 diff --git a/install-files/openstack/usr/share/openstack/cinder-config.yml b/install-files/openstack/usr/share/openstack/cinder-config.yml new file mode 100644 index 00000000..fd3e2cd0 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/cinder-config.yml @@ -0,0 +1,37 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/cinder.conf" + tasks: +# Configure cinder + - name: Create the cinder user. + user: + name: cinder + comment: Openstack Cinder Daemons + shell: /sbin/nologin + home: /var/lib/cinder + + - name: Create the /var folders for cinder + file: + path: "{{ item }}" + state: directory + owner: cinder + group: cinder + with_items: + - /var/run/cinder + - /var/lock/cinder + - /var/log/cinder + - /var/lib/cinder + - /var/lib/cinder/volumes + + - name: Create /etc/cinder directory + file: + path: /etc/cinder + state: directory + + - name: Add the configuration needed for cinder in /etc/cinder using templates + template: + src: /usr/share/openstack/cinder/{{ item }} + dest: /etc/cinder/{{ item }} + with_lines: + - cd /usr/share/openstack/cinder && find -type f diff --git a/install-files/openstack/usr/share/openstack/cinder-db.yml b/install-files/openstack/usr/share/openstack/cinder-db.yml new file mode 100644 index 00000000..2a211720 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/cinder-db.yml @@ -0,0 +1,60 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/cinder.conf" + tasks: + - name: Create cinder service user in service tenant + keystone_user: + user: "{{ CINDER_SERVICE_USER }}" + password: "{{ CINDER_SERVICE_PASSWORD }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Assign admin role to cinder service user in the service tenant + keystone_user: + role: admin + user: "{{ CINDER_SERVICE_USER }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Add cinder endpoint + keystone_service: + name: cinder + type: volume + description: Openstack Block Storage + publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s' + internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s' + adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s' + region: regionOne + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Add cinderv2 endpoint + keystone_service: + name: cinderv2 + type: volumev2 + description: Openstack Block Storage + publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s' + internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s' + adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s' + region: regionOne + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Create postgresql user for cinder + postgresql_user: + name: "{{ CINDER_DB_USER }}" + password: "{{ CINDER_DB_PASSWORD }}" + sudo: yes + sudo_user: cinder + + - name: Create database for cinder services + postgresql_db: + name: cinder + owner: "{{ CINDER_DB_USER }}" + sudo: yes + sudo_user: cinder + + - name: Initiate cinder database + cinder_manage: + action: dbsync + sudo: yes + sudo_user: cinder diff --git a/install-files/openstack/usr/share/openstack/cinder-lvs.yml b/install-files/openstack/usr/share/openstack/cinder-lvs.yml new file mode 100644 index 00000000..7a91a306 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/cinder-lvs.yml @@ -0,0 +1,21 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/cinder.conf" + tasks: + - name: Check that CINDER_DEVICE exists + stat: + path: "{{ CINDER_DEVICE }}" + register: cinder_device_stats + failed_when: cinder_device_stats.stat.exists == false + + - name: Configure LVM group for cinder + lvg: + vg: cinder-volumes + pvs: "{{ CINDER_DEVICE }}" + + - lineinfile: + dest: /etc/lvm/lvm.conf + regexp: '# filter = \[ \"a\/\.\*/\" \]' + line: ' filter = [ "a|{{ CINDER_DEVICE }}|", "r/.*/" ]' + backrefs: yes diff --git a/install-files/openstack/usr/share/openstack/cinder/api-paste.ini b/install-files/openstack/usr/share/openstack/cinder/api-paste.ini new file mode 100644 index 00000000..ba922d5f --- /dev/null +++ b/install-files/openstack/usr/share/openstack/cinder/api-paste.ini @@ -0,0 +1,60 @@ +############# +# OpenStack # +############# + +[composite:osapi_volume] +use = call:cinder.api:root_app_factory +/: apiversions +/v1: openstack_volume_api_v1 +/v2: openstack_volume_api_v2 + +[composite:openstack_volume_api_v1] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = request_id faultwrap sizelimit osprofiler noauth apiv1 +keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 +keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 + +[composite:openstack_volume_api_v2] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = request_id faultwrap sizelimit osprofiler noauth apiv2 +keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 +keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 + +[filter:request_id] +paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:faultwrap] +paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory +hmac_keys = SECRET_KEY +enabled = yes + +[filter:noauth] +paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory + +[app:apiv1] +paste.app_factory = cinder.api.v1.router:APIRouter.factory + +[app:apiv2] +paste.app_factory = cinder.api.v2.router:APIRouter.factory + +[pipeline:apiversions] +pipeline = faultwrap osvolumeversionapp + +[app:osvolumeversionapp] +paste.app_factory = cinder.api.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/install-files/openstack/usr/share/openstack/cinder/cinder.conf b/install-files/openstack/usr/share/openstack/cinder/cinder.conf new file mode 100644 index 00000000..a58004b5 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/cinder/cinder.conf @@ -0,0 +1,2825 @@ +[DEFAULT] + +# +# Options defined in oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in AMQP. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer +# value) +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1 and SSLv23. SSLv2 and SSLv3 may be available on +# some distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +rabbit_host={{ RABBITMQ_HOST }} + +# The RabbitMQ broker port where a single node is used. +# (integer value) +rabbit_port={{ RABBITMQ_PORT }} + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid={{ RABBITMQ_USER }} + +# The RabbitMQ password. (string value) +rabbit_password={{ RABBITMQ_PASSWORD }} + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake +# (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=cinder + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +notification_driver=messagingv2 + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +control_exchange=cinder + + +# +# Options defined in cinder.exception +# + +# Make exception message format errors fatal. (boolean value) +#fatal_exception_format_errors=false + + +# +# Options defined in cinder.quota +# + +# Number of volumes allowed per project (integer value) +#quota_volumes=10 + +# Number of volume snapshots allowed per project (integer +# value) +#quota_snapshots=10 + +# Number of consistencygroups allowed per project (integer +# value) +#quota_consistencygroups=10 + +# Total amount of storage, in gigabytes, allowed for volumes +# and snapshots per project (integer value) +#quota_gigabytes=1000 + +# Number of volume backups allowed per project (integer value) +#quota_backups=10 + +# Total amount of storage, in gigabytes, allowed for backups +# per project (integer value) +#quota_backup_gigabytes=1000 + +# Number of seconds until a reservation expires (integer +# value) +#reservation_expire=86400 + +# Count of reservations until usage is refreshed (integer +# value) +#until_refresh=0 + +# Number of seconds between subsequent usage refreshes +# (integer value) +#max_age=0 + +# Default driver to use for quota checks (string value) +#quota_driver=cinder.quota.DbQuotaDriver + +# Enables or disables use of default quota class with default +# quota. (boolean value) +#use_default_quota_class=true + + +# +# Options defined in cinder.service +# + +# Interval, in seconds, between nodes reporting state to +# datastore (integer value) +#report_interval=10 + +# Interval, in seconds, between running periodic tasks +# (integer value) +#periodic_interval=60 + +# Range, in seconds, to randomly delay when starting the +# periodic task scheduler to reduce stampeding. (Disable by +# setting to 0) (integer value) +#periodic_fuzzy_delay=60 + +# IP address on which OpenStack Volume API listens (string +# value) +#osapi_volume_listen=0.0.0.0 + +# Port on which OpenStack Volume API listens (integer value) +#osapi_volume_listen_port=8776 + +# Number of workers for OpenStack Volume API service. The +# default is equal to the number of CPUs available. (integer +# value) +#osapi_volume_workers= + + +# +# Options defined in cinder.ssh_utils +# + +# Option to enable strict host key checking. When set to +# "True" Cinder will only connect to systems with a host key +# present in the configured "ssh_hosts_key_file". When set to +# "False" the host key will be saved upon first connection and +# used for subsequent connections. Default=False (boolean +# value) +#strict_ssh_host_key_policy=false + +# File containing SSH host keys for the systems with which +# Cinder needs to communicate. OPTIONAL: +# Default=$state_path/ssh_known_hosts (string value) +#ssh_hosts_key_file=$state_path/ssh_known_hosts + + +# +# Options defined in cinder.test +# + +# File name of clean sqlite db (string value) +#sqlite_clean_db=clean.sqlite + + +# +# Options defined in cinder.wsgi +# + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + +# If False, closes the client socket connection explicitly. +# Setting it to True to maintain backward compatibility. +# Recommended setting is set it to False. (boolean value) +#wsgi_keep_alive=true + +# Sets the value of TCP_KEEPALIVE (True/False) for each server +# socket. (boolean value) +#tcp_keepalive=true + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepidle=600 + +# Sets the value of TCP_KEEPINTVL in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepalive_interval= + +# Sets the value of TCP_KEEPCNT for each server socket. Not +# supported on OS X. (integer value) +#tcp_keepalive_count= + +# CA certificate file to use to verify connecting clients +# (string value) +#ssl_ca_file= + +# Certificate file to use when starting the server securely +# (string value) +#ssl_cert_file= + +# Private key file to use when starting the server securely +# (string value) +#ssl_key_file= + + +# +# Options defined in cinder.api.common +# + +# The maximum number of items that a collection resource +# returns in a single response (integer value) +#osapi_max_limit=1000 + +# Base URL that will be presented to users in links to the +# OpenStack Volume API (string value) +# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix +#osapi_volume_base_URL= + + +# +# Options defined in cinder.api.middleware.auth +# + +# Treat X-Forwarded-For as the canonical remote address. Only +# enable this if you have a sanitizing proxy. (boolean value) +#use_forwarded_for=false + + +# +# Options defined in cinder.api.middleware.sizelimit +# + +# Max size for body of a request (integer value) +#osapi_max_request_body_size=114688 + + +# +# Options defined in cinder.backup.driver +# + +# Backup metadata version to be used when backing up volume +# metadata. If this number is bumped, make sure the service +# doing the restore supports the new version. (integer value) +#backup_metadata_version=1 + + +# +# Options defined in cinder.backup.drivers.ceph +# + +# Ceph configuration file to use. (string value) +#backup_ceph_conf=/etc/ceph/ceph.conf + +# The Ceph user to connect with. Default here is to use the +# same user as for Cinder volumes. If not using cephx this +# should be set to None. (string value) +#backup_ceph_user=cinder + +# The chunk size, in bytes, that a backup is broken into +# before transfer to the Ceph object store. (integer value) +#backup_ceph_chunk_size=134217728 + +# The Ceph pool where volume backups are stored. (string +# value) +#backup_ceph_pool=backups + +# RBD stripe unit to use when creating a backup image. +# (integer value) +#backup_ceph_stripe_unit=0 + +# RBD stripe count to use when creating a backup image. +# (integer value) +#backup_ceph_stripe_count=0 + +# If True, always discard excess bytes when restoring volumes +# i.e. pad with zeroes. (boolean value) +#restore_discard_excess_bytes=true + + +# +# Options defined in cinder.backup.drivers.swift +# + +# The URL of the Swift endpoint (string value) +#backup_swift_url= + +# Info to match when looking for swift in the service catalog. +# Format is: separated values of the form: +# :: - Only used if +# backup_swift_url is unset (string value) +#swift_catalog_info=object-store:swift:publicURL + +# Swift authentication mechanism (string value) +#backup_swift_auth=per_user + +# Swift authentication version. Specify "1" for auth 1.0, or +# "2" for auth 2.0 (string value) +#backup_swift_auth_version=1 + +# Swift tenant/account name. Required when connecting to an +# auth 2.0 system (string value) +#backup_swift_tenant= + +# Swift user name (string value) +#backup_swift_user= + +# Swift key for authentication (string value) +#backup_swift_key= + +# The default Swift container to use (string value) +#backup_swift_container=volumebackups + +# The size in bytes of Swift backup objects (integer value) +#backup_swift_object_size=52428800 + +# The number of retries to make for Swift operations (integer +# value) +#backup_swift_retry_attempts=3 + +# The backoff time in seconds between Swift retries (integer +# value) +#backup_swift_retry_backoff=2 + +# Compression algorithm (None to disable) (string value) +#backup_compression_algorithm=zlib + + +# +# Options defined in cinder.backup.drivers.tsm +# + +# Volume prefix for the backup id when backing up to TSM +# (string value) +#backup_tsm_volume_prefix=backup + +# TSM password for the running username (string value) +#backup_tsm_password=password + +# Enable or Disable compression for backups (boolean value) +#backup_tsm_compression=true + + +# +# Options defined in cinder.backup.manager +# + +# Driver to use for backups. (string value) +# Deprecated group/name - [DEFAULT]/backup_service +#backup_driver=cinder.backup.drivers.swift + + +# +# Options defined in cinder.common.config +# + +# File name for the paste.deploy config for cinder-api (string +# value) +api_paste_config=api-paste.ini + +# Top-level directory for maintaining cinder's state (string +# value) +# Deprecated group/name - [DEFAULT]/pybasedir +state_path=/var/lib/cinder + +# IP address of this host (string value) +my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# Default glance host name or IP (string value) +glance_host={{ CONTROLLER_HOST_ADDRESS }} + +# Default glance port (integer value) +#glance_port=9292 + +# A list of the glance API servers available to cinder +# ([hostname|ip]:port) (list value) +#glance_api_servers=$glance_host:$glance_port + +# Version of the glance API to use (integer value) +#glance_api_version=1 + +# Number retries when downloading an image from glance +# (integer value) +#glance_num_retries=0 + +# Allow to perform insecure SSL (https) requests to glance +# (boolean value) +#glance_api_insecure=false + +# Enables or disables negotiation of SSL layer compression. In +# some cases disabling compression can improve data +# throughput, such as when high network bandwidth is available +# and you use compressed image formats like qcow2. (boolean +# value) +#glance_api_ssl_compression=false + +# Location of ca certificates file to use for glance client +# requests. (string value) +#glance_ca_certificates_file= + +# http/https timeout value for glance operations. If no value +# (None) is supplied here, the glanceclient default value is +# used. (integer value) +#glance_request_timeout= + +# The topic that scheduler nodes listen on (string value) +#scheduler_topic=cinder-scheduler + +# The topic that volume nodes listen on (string value) +#volume_topic=cinder-volume + +# The topic that volume backup nodes listen on (string value) +#backup_topic=cinder-backup + +# DEPRECATED: Deploy v1 of the Cinder API. (boolean value) +#enable_v1_api=true + +# Deploy v2 of the Cinder API. (boolean value) +#enable_v2_api=true + +# Enables or disables rate limit of the API. (boolean value) +#api_rate_limit=true + +# Specify list of extensions to load when using +# osapi_volume_extension option with +# cinder.api.contrib.select_extensions (list value) +#osapi_volume_ext_list= + +# osapi volume extension to load (multi valued) +#osapi_volume_extension=cinder.api.contrib.standard_extensions + +# Full class name for the Manager for volume (string value) +#volume_manager=cinder.volume.manager.VolumeManager + +# Full class name for the Manager for volume backup (string +# value) +#backup_manager=cinder.backup.manager.BackupManager + +# Full class name for the Manager for scheduler (string value) +#scheduler_manager=cinder.scheduler.manager.SchedulerManager + +# Name of this node. This can be an opaque identifier. It is +# not necessarily a host name, FQDN, or IP address. (string +# value) +#host=cinder + +# Availability zone of this node (string value) +#storage_availability_zone=nova + +# Default availability zone for new volumes. If not set, the +# storage_availability_zone option value is used as the +# default for new volumes. (string value) +#default_availability_zone= + +# Default volume type to use (string value) +#default_volume_type= + +# Time period for which to generate volume usages. The options +# are hour, day, month, or year. (string value) +#volume_usage_audit_period=month + +# Path to the rootwrap configuration file to use for running +# commands as root (string value) +rootwrap_config=/etc/cinder/rootwrap.conf + +# Enable monkey patching (boolean value) +#monkey_patch=false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules= + +# Maximum time since last check-in for a service to be +# considered up (integer value) +#service_down_time=60 + +# The full class name of the volume API class to use (string +# value) +#volume_api_class=cinder.volume.api.API + +# The full class name of the volume backup API class (string +# value) +#backup_api_class=cinder.backup.api.API + +# The strategy to use for auth. Supports noauth, keystone, and +# deprecated. (string value) +auth_strategy=keystone + +# A list of backend names to use. These backend names should +# be backed by a unique [CONFIG] group with its options (list +# value) +#enabled_backends= + +# Whether snapshots count against GigaByte quota (boolean +# value) +#no_snapshot_gb_quota=false + +# The full class name of the volume transfer API class (string +# value) +#transfer_api_class=cinder.transfer.api.API + +# The full class name of the volume replication API class +# (string value) +#replication_api_class=cinder.replication.api.API + +# The full class name of the consistencygroup API class +# (string value) +#consistencygroup_api_class=cinder.consistencygroup.api.API + + +# +# Options defined in cinder.compute +# + +# The full class name of the compute API class to use (string +# value) +#compute_api_class=cinder.compute.nova.API + + +# +# Options defined in cinder.compute.nova +# + +# Match this value when searching for nova in the service +# catalog. Format is: separated values of the form: +# :: (string value) +#nova_catalog_info=compute:nova:publicURL + +# Same as nova_catalog_info, but for admin endpoint. (string +# value) +#nova_catalog_admin_info=compute:nova:adminURL + +# Override service catalog lookup with template for nova +# endpoint e.g. http://localhost:8774/v2/%(project_id)s +# (string value) +#nova_endpoint_template= + +# Same as nova_endpoint_template, but for admin endpoint. +# (string value) +#nova_endpoint_admin_template= + +# Region name of this node (string value) +#os_region_name= + +# Location of ca certificates file to use for nova client +# requests. (string value) +#nova_ca_certificates_file= + +# Allow to perform insecure SSL requests to nova (boolean +# value) +#nova_api_insecure=false + + +# +# Options defined in cinder.db.api +# + +# The backend to use for db (string value) +#db_backend=sqlalchemy + +# Services to be added to the available pool on create +# (boolean value) +#enable_new_services=true + +# Template string to be used to generate volume names (string +# value) +volume_name_template=volume-%s + +# Template string to be used to generate snapshot names +# (string value) +#snapshot_name_template=snapshot-%s + +# Template string to be used to generate backup names (string +# value) +#backup_name_template=backup-%s + + +# +# Options defined in cinder.db.base +# + +# Driver to use for database access (string value) +#db_driver=cinder.db + + +# +# Options defined in cinder.image.glance +# + +# Default core properties of image (list value) +#glance_core_properties=checksum,container_format,disk_format,image_name,image_id,min_disk,min_ram,name,size + +# A list of url schemes that can be downloaded directly via +# the direct_url. Currently supported schemes: [file]. (list +# value) +#allowed_direct_url_schemes= + + +# +# Options defined in cinder.image.image_utils +# + +# Directory used for temporary storage during image conversion +# (string value) +#image_conversion_dir=$state_path/conversion + + +# +# Options defined in cinder.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in cinder.openstack.common.lockutils +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. Default to a temp directory +# (string value) +lock_path=/var/lock/cinder + + +# +# Options defined in cinder.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error. (boolean value) +#use_stderr=true + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will change in J to honor RFC5424. (boolean +# value) +use_syslog = True + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in cinder.openstack.common.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + + +# +# Options defined in cinder.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. +# (string value) +#policy_default_rule=default + + +# +# Options defined in cinder.scheduler.driver +# + +# The scheduler host manager class to use (string value) +#scheduler_host_manager=cinder.scheduler.host_manager.HostManager + +# Maximum number of attempts to schedule an volume (integer +# value) +#scheduler_max_attempts=3 + + +# +# Options defined in cinder.scheduler.host_manager +# + +# Which filter class names to use for filtering hosts when not +# specified in the request. (list value) +#scheduler_default_filters=AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter + +# Which weigher class names to use for weighing hosts. (list +# value) +#scheduler_default_weighers=CapacityWeigher + + +# +# Options defined in cinder.scheduler.manager +# + +# Default scheduler driver to use (string value) +#scheduler_driver=cinder.scheduler.filter_scheduler.FilterScheduler + + +# +# Options defined in cinder.scheduler.scheduler_options +# + +# Absolute path to scheduler configuration JSON file. (string +# value) +#scheduler_json_config_location= + + +# +# Options defined in cinder.scheduler.simple +# + +# This configure option has been deprecated along with the +# SimpleScheduler. New scheduler is able to gather capacity +# information for each host, thus setting the maximum number +# of volume gigabytes for host is no longer needed. It's safe +# to remove this configure from cinder.conf. (integer value) +#max_gigabytes=10000 + + +# +# Options defined in cinder.scheduler.weights.capacity +# + +# Multiplier used for weighing volume capacity. Negative +# numbers mean to stack vs spread. (floating point value) +#capacity_weight_multiplier=1.0 + +# Multiplier used for weighing volume capacity. Negative +# numbers mean to stack vs spread. (floating point value) +#allocated_capacity_weight_multiplier=-1.0 + + +# +# Options defined in cinder.scheduler.weights.volume_number +# + +# Multiplier used for weighing volume number. Negative numbers +# mean to spread vs stack. (floating point value) +#volume_number_multiplier=-1.0 + + +# +# Options defined in cinder.transfer.api +# + +# The number of characters in the salt. (integer value) +#volume_transfer_salt_length=8 + +# The number of characters in the autogenerated auth key. +# (integer value) +#volume_transfer_key_length=16 + + +# +# Options defined in cinder.volume.api +# + +# Cache volume availability zones in memory for the provided +# duration in seconds (integer value) +#az_cache_duration=3600 + +# Create volume from snapshot at the host where snapshot +# resides (boolean value) +#snapshot_same_host=true + +# Ensure that the new volumes are the same AZ as snapshot or +# source volume (boolean value) +#cloned_volume_same_az=true + + +# +# Options defined in cinder.volume.driver +# + +# The maximum number of times to rescan iSER targetto find +# volume (integer value) +#num_iser_scan_tries=3 + +# The maximum number of iSER target IDs per host (integer +# value) +#iser_num_targets=100 + +# Prefix for iSER volumes (string value) +#iser_target_prefix=iqn.2010-10.org.iser.openstack: + +# The IP address that the iSER daemon is listening on (string +# value) +#iser_ip_address=$my_ip + +# The port that the iSER daemon is listening on (integer +# value) +#iser_port=3260 + +# The name of the iSER target user-land tool to use (string +# value) +#iser_helper=tgtadm + +# Number of times to attempt to run flakey shell commands +# (integer value) +#num_shell_tries=3 + +# The percentage of backend capacity is reserved (integer +# value) +#reserved_percentage=0 + +# The maximum number of iSCSI target IDs per host (integer +# value) +#iscsi_num_targets=100 + +# Prefix for iSCSI volumes (string value) +#iscsi_target_prefix=iqn.2010-10.org.openstack: + +# The IP address that the iSCSI daemon is listening on (string +# value) +iscsi_ip_address={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# The port that the iSCSI daemon is listening on (integer +# value) +#iscsi_port=3260 + +# The maximum number of times to rescan targets to find volume +# (integer value) +# Deprecated group/name - [DEFAULT]/num_iscsi_scan_tries +#num_volume_device_scan_tries=3 + +# The backend name for a given driver implementation (string +# value) +volume_backend_name=LVM_iSCSI + +# Do we attach/detach volumes in cinder using multipath for +# volume to image and image to volume transfers? (boolean +# value) +#use_multipath_for_image_xfer=false + +# Method used to wipe old volumes (valid options are: none, +# zero, shred) (string value) +#volume_clear=zero + +# Size in MiB to wipe at start of old volumes. 0 => all +# (integer value) +#volume_clear_size=0 + +# The flag to pass to ionice to alter the i/o priority of the +# process used to zero a volume after deletion, for example +# "-c3" for idle only priority. (string value) +#volume_clear_ionice= + +# iSCSI target user-land tool to use. tgtadm is default, use +# lioadm for LIO iSCSI support, iseradm for the ISER protocol, +# or fake for testing. (string value) +iscsi_helper=lioadm + +# Volume configuration file storage directory (string value) +volumes_dir=$state_path/volumes + +# IET configuration file (string value) +#iet_conf=/etc/iet/ietd.conf + +# Comma-separated list of initiator IQNs allowed to connect to +# the iSCSI target. (From Nova compute nodes.) (string value) +#lio_initiator_iqns= + +# Sets the behavior of the iSCSI target to either perform +# blockio or fileio optionally, auto can be set and Cinder +# will autodetect type of backing device (string value) +#iscsi_iotype=fileio + +# The default block size used when copying/clearing volumes +# (string value) +#volume_dd_blocksize=1M + +# The blkio cgroup name to be used to limit bandwidth of +# volume copy (string value) +#volume_copy_blkio_cgroup_name=cinder-volume-copy + +# The upper limit of bandwidth of volume copy. 0 => unlimited +# (integer value) +#volume_copy_bps_limit=0 + +# Sets the behavior of the iSCSI target to either perform +# write-back(on) or write-through(off). This parameter is +# valid if iscsi_helper is set to tgtadm or iseradm. (string +# value) +#iscsi_write_cache=on + +# The path to the client certificate key for verification, if +# the driver supports it. (string value) +#driver_client_cert_key= + +# The path to the client certificate for verification, if the +# driver supports it. (string value) +#driver_client_cert= + + +# +# Options defined in cinder.volume.drivers.block_device +# + +# List of all available devices (list value) +#available_devices= + + +# +# Options defined in cinder.volume.drivers.coraid +# + +# IP address of Coraid ESM (string value) +#coraid_esm_address= + +# User name to connect to Coraid ESM (string value) +#coraid_user=admin + +# Name of group on Coraid ESM to which coraid_user belongs +# (must have admin privilege) (string value) +#coraid_group=admin + +# Password to connect to Coraid ESM (string value) +#coraid_password=password + +# Volume Type key name to store ESM Repository Name (string +# value) +#coraid_repository_key=coraid_repository + + +# +# Options defined in cinder.volume.drivers.datera +# + +# Datera API token. (string value) +#datera_api_token= + +# Datera API port. (string value) +#datera_api_port=7717 + +# Datera API version. (string value) +#datera_api_version=1 + +# Number of replicas to create of an inode. (string value) +#datera_num_replicas=3 + + +# +# Options defined in cinder.volume.drivers.emc.emc_vmax_common +# + +# use this file for cinder emc plugin config data (string +# value) +#cinder_emc_config_file=/etc/cinder/cinder_emc_config.xml + + +# +# Options defined in cinder.volume.drivers.emc.emc_vnx_cli +# + +# VNX authentication scope type. (string value) +#storage_vnx_authentication_type=global + +# Directory path that contains the VNX security file. Make +# sure the security file is generated first. (string value) +#storage_vnx_security_file_dir= + +# Naviseccli Path. (string value) +#naviseccli_path= + +# Storage pool name. (string value) +#storage_vnx_pool_name= + +# VNX secondary SP IP Address. (string value) +#san_secondary_ip= + +# Default timeout for CLI operations in minutes. For example, +# LUN migration is a typical long running operation, which +# depends on the LUN size and the load of the array. An upper +# bound in the specific deployment can be set to avoid +# unnecessary long wait. By default, it is 365 days long. +# (integer value) +#default_timeout=525600 + +# Default max number of LUNs in a storage group. By default, +# the value is 255. (integer value) +#max_luns_per_storage_group=255 + +# To destroy storage group when the last LUN is removed from +# it. By default, the value is False. (boolean value) +#destroy_empty_storage_group=false + +# Mapping between hostname and its iSCSI initiator IP +# addresses. (string value) +#iscsi_initiators= + +# Automatically register initiators. By default, the value is +# False. (boolean value) +#initiator_auto_registration=false + + +# +# Options defined in cinder.volume.drivers.eqlx +# + +# Group name to use for creating volumes (string value) +#eqlx_group_name=group-0 + +# Timeout for the Group Manager cli command execution (integer +# value) +#eqlx_cli_timeout=30 + +# Maximum retry count for reconnection (integer value) +#eqlx_cli_max_retries=5 + +# Use CHAP authentication for targets? (boolean value) +#eqlx_use_chap=false + +# Existing CHAP account name (string value) +#eqlx_chap_login=admin + +# Password for specified CHAP account name (string value) +#eqlx_chap_password=password + +# Pool in which volumes will be created (string value) +#eqlx_pool=default + + +# +# Options defined in cinder.volume.drivers.fujitsu_eternus_dx_common +# + +# The configuration file for the Cinder SMI-S driver (string +# value) +#cinder_smis_config_file=/etc/cinder/cinder_fujitsu_eternus_dx.xml + + +# +# Options defined in cinder.volume.drivers.fusionio.ioControl +# + +# amount of time wait for iSCSI target to come online (integer +# value) +#fusionio_iocontrol_targetdelay=5 + +# number of retries for GET operations (integer value) +#fusionio_iocontrol_retry=3 + +# verify the array certificate on each transaction (boolean +# value) +#fusionio_iocontrol_verify_cert=true + + +# +# Options defined in cinder.volume.drivers.glusterfs +# + +# File with the list of available gluster shares (string +# value) +#glusterfs_shares_config=/etc/cinder/glusterfs_shares + +# Create volumes as sparsed files which take no space.If set +# to False volume is created as regular file.In such case +# volume creation takes a lot of time. (boolean value) +#glusterfs_sparsed_volumes=true + +# Create volumes as QCOW2 files rather than raw files. +# (boolean value) +#glusterfs_qcow2_volumes=false + +# Base dir containing mount points for gluster shares. (string +# value) +#glusterfs_mount_point_base=$state_path/mnt + + +# +# Options defined in cinder.volume.drivers.hds.hds +# + +# The configuration file for the Cinder HDS driver for HUS +# (string value) +#hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml + + +# +# Options defined in cinder.volume.drivers.hds.iscsi +# + +# Configuration file for HDS iSCSI cinder plugin (string +# value) +#hds_hnas_iscsi_config_file=/opt/hds/hnas/cinder_iscsi_conf.xml + + +# +# Options defined in cinder.volume.drivers.hds.nfs +# + +# Configuration file for HDS NFS cinder plugin (string value) +#hds_hnas_nfs_config_file=/opt/hds/hnas/cinder_nfs_conf.xml + + +# +# Options defined in cinder.volume.drivers.hitachi.hbsd_common +# + +# Serial number of storage system (string value) +#hitachi_serial_number= + +# Name of an array unit (string value) +#hitachi_unit_name= + +# Pool ID of storage system (integer value) +#hitachi_pool_id= + +# Thin pool ID of storage system (integer value) +#hitachi_thin_pool_id= + +# Range of logical device of storage system (string value) +#hitachi_ldev_range= + +# Default copy method of storage system (string value) +#hitachi_default_copy_method=FULL + +# Copy speed of storage system (integer value) +#hitachi_copy_speed=3 + +# Interval to check copy (integer value) +#hitachi_copy_check_interval=3 + +# Interval to check copy asynchronously (integer value) +#hitachi_async_copy_check_interval=10 + +# Control port names for HostGroup or iSCSI Target (string +# value) +#hitachi_target_ports= + +# Range of group number (string value) +#hitachi_group_range= + +# Request for creating HostGroup or iSCSI Target (boolean +# value) +#hitachi_group_request=false + + +# +# Options defined in cinder.volume.drivers.hitachi.hbsd_fc +# + +# Request for FC Zone creating HostGroup (boolean value) +#hitachi_zoning_request=false + + +# +# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm +# + +# Instance numbers for HORCM (string value) +#hitachi_horcm_numbers=200,201 + +# Username of storage system for HORCM (string value) +#hitachi_horcm_user= + +# Password of storage system for HORCM (string value) +#hitachi_horcm_password= + +# Add to HORCM configuration (boolean value) +#hitachi_horcm_add_conf=true + + +# +# Options defined in cinder.volume.drivers.hitachi.hbsd_iscsi +# + +# Add CHAP user (boolean value) +#hitachi_add_chap_user=false + +# iSCSI authentication method (string value) +#hitachi_auth_method= + +# iSCSI authentication username (string value) +#hitachi_auth_user=HBSD-CHAP-user + +# iSCSI authentication password (string value) +#hitachi_auth_password=HBSD-CHAP-password + + +# +# Options defined in cinder.volume.drivers.huawei +# + +# The configuration file for the Cinder Huawei driver (string +# value) +#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml + + +# +# Options defined in cinder.volume.drivers.ibm.gpfs +# + +# Specifies the path of the GPFS directory where Block Storage +# volume and snapshot files are stored. (string value) +#gpfs_mount_point_base= + +# Specifies the path of the Image service repository in GPFS. +# Leave undefined if not storing images in GPFS. (string +# value) +#gpfs_images_dir= + +# Specifies the type of image copy to be used. Set this when +# the Image service repository also uses GPFS so that image +# files can be transferred efficiently from the Image service +# to the Block Storage service. There are two valid values: +# "copy" specifies that a full copy of the image is made; +# "copy_on_write" specifies that copy-on-write optimization +# strategy is used and unmodified blocks of the image file are +# shared efficiently. (string value) +#gpfs_images_share_mode= + +# Specifies an upper limit on the number of indirections +# required to reach a specific block due to snapshots or +# clones. A lengthy chain of copy-on-write snapshots or +# clones can have a negative impact on performance, but +# improves space utilization. 0 indicates unlimited clone +# depth. (integer value) +#gpfs_max_clone_depth=0 + +# Specifies that volumes are created as sparse files which +# initially consume no space. If set to False, the volume is +# created as a fully allocated file, in which case, creation +# may take a significantly longer time. (boolean value) +#gpfs_sparse_volumes=true + +# Specifies the storage pool that volumes are assigned to. By +# default, the system storage pool is used. (string value) +#gpfs_storage_pool=system + + +# +# Options defined in cinder.volume.drivers.ibm.ibmnas +# + +# IP address or Hostname of NAS system. (string value) +#nas_ip= + +# User name to connect to NAS system. (string value) +#nas_login=admin + +# Password to connect to NAS system. (string value) +#nas_password= + +# SSH port to use to connect to NAS system. (integer value) +#nas_ssh_port=22 + +# Filename of private key to use for SSH authentication. +# (string value) +#nas_private_key= + +# IBMNAS platform type to be used as backend storage; valid +# values are - v7ku : for using IBM Storwize V7000 Unified, +# sonas : for using IBM Scale Out NAS, gpfs-nas : for using +# NFS based IBM GPFS deployments. (string value) +#ibmnas_platform_type=v7ku + + +# +# Options defined in cinder.volume.drivers.ibm.storwize_svc +# + +# Storage system storage pool for volumes (string value) +#storwize_svc_volpool_name=volpool + +# Storage system space-efficiency parameter for volumes +# (percentage) (integer value) +#storwize_svc_vol_rsize=2 + +# Storage system threshold for volume capacity warnings +# (percentage) (integer value) +#storwize_svc_vol_warning=0 + +# Storage system autoexpand parameter for volumes (True/False) +# (boolean value) +#storwize_svc_vol_autoexpand=true + +# Storage system grain size parameter for volumes +# (32/64/128/256) (integer value) +#storwize_svc_vol_grainsize=256 + +# Storage system compression option for volumes (boolean +# value) +#storwize_svc_vol_compression=false + +# Enable Easy Tier for volumes (boolean value) +#storwize_svc_vol_easytier=true + +# The I/O group in which to allocate volumes (integer value) +#storwize_svc_vol_iogrp=0 + +# Maximum number of seconds to wait for FlashCopy to be +# prepared. Maximum value is 600 seconds (10 minutes) (integer +# value) +#storwize_svc_flashcopy_timeout=120 + +# Connection protocol (iSCSI/FC) (string value) +#storwize_svc_connection_protocol=iSCSI + +# Configure CHAP authentication for iSCSI connections +# (Default: Enabled) (boolean value) +#storwize_svc_iscsi_chap_enabled=true + +# Connect with multipath (FC only; iSCSI multipath is +# controlled by Nova) (boolean value) +#storwize_svc_multipath_enabled=false + +# Allows vdisk to multi host mapping (boolean value) +#storwize_svc_multihostmap_enabled=true + +# Indicate whether svc driver is compatible for NPIV setup. If +# it is compatible, it will allow no wwpns being returned on +# get_conn_fc_wwpns during initialize_connection (boolean +# value) +#storwize_svc_npiv_compatibility_mode=false + +# Allow tenants to specify QOS on create (boolean value) +#storwize_svc_allow_tenant_qos=false + +# If operating in stretched cluster mode, specify the name of +# the pool in which mirrored copies are stored.Example: +# "pool2" (string value) +#storwize_svc_stretched_cluster_partner= + + +# +# Options defined in cinder.volume.drivers.ibm.xiv_ds8k +# + +# Proxy driver that connects to the IBM Storage Array (string +# value) +#xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy + +# Connection type to the IBM Storage Array +# (fibre_channel|iscsi) (string value) +#xiv_ds8k_connection_type=iscsi + +# CHAP authentication mode, effective only for iscsi +# (disabled|enabled) (string value) +#xiv_chap=disabled + + +# +# Options defined in cinder.volume.drivers.lvm +# + +# Name for the VG that will contain exported volumes (string +# value) +volume_group=cinder-volumes + +# If >0, create LVs with multiple mirrors. Note that this +# requires lvm_mirrors + 2 PVs with available space (integer +# value) +#lvm_mirrors=0 + +# Type of LVM volumes to deploy; (default or thin) (string +# value) +#lvm_type=default + + +# +# Options defined in cinder.volume.drivers.netapp.options +# + +# The vFiler unit on which provisioning of block storage +# volumes will be done. This option is only used by the driver +# when connecting to an instance with a storage family of Data +# ONTAP operating in 7-Mode and the storage protocol selected +# is iSCSI. Only use this option when utilizing the MultiStore +# feature on the NetApp storage system. (string value) +#netapp_vfiler= + +# Administrative user account name used to access the storage +# system or proxy server. (string value) +#netapp_login= + +# Password for the administrative user account specified in +# the netapp_login option. (string value) +#netapp_password= + +# This option specifies the virtual storage server (Vserver) +# name on the storage cluster on which provisioning of block +# storage volumes should occur. If using the NFS storage +# protocol, this parameter is mandatory for storage service +# catalog support (utilized by Cinder volume type extra_specs +# support). If this option is specified, the exports belonging +# to the Vserver will only be used for provisioning in the +# future. Block storage volumes on exports not belonging to +# the Vserver specified by this option will continue to +# function normally. (string value) +#netapp_vserver= + +# The hostname (or IP address) for the storage system or proxy +# server. (string value) +#netapp_server_hostname= + +# The TCP port to use for communication with the storage +# system or proxy server. Traditionally, port 80 is used for +# HTTP and port 443 is used for HTTPS; however, this value +# should be changed if an alternate port has been configured +# on the storage system or proxy server. (integer value) +#netapp_server_port=80 + +# This option is used to specify the path to the E-Series +# proxy application on a proxy server. The value is combined +# with the value of the netapp_transport_type, +# netapp_server_hostname, and netapp_server_port options to +# create the URL used by the driver to connect to the proxy +# application. (string value) +#netapp_webservice_path=/devmgr/v2 + +# This option is only utilized when the storage family is +# configured to eseries. This option is used to restrict +# provisioning to the specified controllers. Specify the value +# of this option to be a comma separated list of controller +# hostnames or IP addresses to be used for provisioning. +# (string value) +#netapp_controller_ips= + +# Password for the NetApp E-Series storage array. (string +# value) +#netapp_sa_password= + +# This option is used to restrict provisioning to the +# specified storage pools. Only dynamic disk pools are +# currently supported. Specify the value of this option to be +# a comma separated list of disk pool names to be used for +# provisioning. (string value) +#netapp_storage_pools= + +# This option is used to define how the controllers in the +# E-Series storage array will work with the particular +# operating system on the hosts that are connected to it. +# (string value) +#netapp_eseries_host_type=linux_dm_mp + +# If the percentage of available space for an NFS share has +# dropped below the value specified by this option, the NFS +# image cache will be cleaned. (integer value) +#thres_avl_size_perc_start=20 + +# When the percentage of available space on an NFS share has +# reached the percentage specified by this option, the driver +# will stop clearing files from the NFS image cache that have +# not been accessed in the last M minutes, where M is the +# value of the expiry_thres_minutes configuration option. +# (integer value) +#thres_avl_size_perc_stop=60 + +# This option specifies the threshold for last access time for +# images in the NFS image cache. When a cache cleaning cycle +# begins, images in the cache that have not been accessed in +# the last M minutes, where M is the value of this parameter, +# will be deleted from the cache to create free space on the +# NFS share. (integer value) +#expiry_thres_minutes=720 + +# This option specifies the path of the NetApp copy offload +# tool binary. Ensure that the binary has execute permissions +# set which allow the effective user of the cinder-volume +# process to execute the file. (string value) +#netapp_copyoffload_tool_path= + +# The quantity to be multiplied by the requested volume size +# to ensure enough space is available on the virtual storage +# server (Vserver) to fulfill the volume creation request. +# (floating point value) +#netapp_size_multiplier=1.2 + +# This option is only utilized when the storage protocol is +# configured to use iSCSI. This option is used to restrict +# provisioning to the specified controller volumes. Specify +# the value of this option to be a comma separated list of +# NetApp controller volume names to be used for provisioning. +# (string value) +#netapp_volume_list= + +# The storage family type used on the storage system; valid +# values are ontap_7mode for using Data ONTAP operating in +# 7-Mode, ontap_cluster for using clustered Data ONTAP, or +# eseries for using E-Series. (string value) +#netapp_storage_family=ontap_cluster + +# The storage protocol to be used on the data path with the +# storage system; valid values are iscsi or nfs. (string +# value) +#netapp_storage_protocol= + +# The transport protocol used when communicating with the +# storage system or proxy server. Valid values are http or +# https. (string value) +#netapp_transport_type=http + + +# +# Options defined in cinder.volume.drivers.nexenta.options +# + +# IP address of Nexenta SA (string value) +#nexenta_host= + +# HTTP port to connect to Nexenta REST API server (integer +# value) +#nexenta_rest_port=2000 + +# Use http or https for REST connection (default auto) (string +# value) +#nexenta_rest_protocol=auto + +# User name to connect to Nexenta SA (string value) +#nexenta_user=admin + +# Password to connect to Nexenta SA (string value) +#nexenta_password=nexenta + +# Nexenta target portal port (integer value) +#nexenta_iscsi_target_portal_port=3260 + +# SA Pool that holds all volumes (string value) +#nexenta_volume=cinder + +# IQN prefix for iSCSI targets (string value) +#nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder- + +# Prefix for iSCSI target groups on SA (string value) +#nexenta_target_group_prefix=cinder/ + +# File with the list of available nfs shares (string value) +#nexenta_shares_config=/etc/cinder/nfs_shares + +# Base directory that contains NFS share mount points (string +# value) +#nexenta_mount_point_base=$state_path/mnt + +# Enables or disables the creation of volumes as sparsed files +# that take no space. If disabled (False), volume is created +# as a regular file, which takes a long time. (boolean value) +#nexenta_sparsed_volumes=true + +# Default compression value for new ZFS folders. (string +# value) +#nexenta_volume_compression=on + +# If set True cache NexentaStor appliance volroot option +# value. (boolean value) +#nexenta_nms_cache_volroot=true + +# Enable stream compression, level 1..9. 1 - gives best speed; +# 9 - gives best compression. (integer value) +#nexenta_rrmgr_compression=0 + +# TCP Buffer size in KiloBytes. (integer value) +#nexenta_rrmgr_tcp_buf_size=4096 + +# Number of TCP connections. (integer value) +#nexenta_rrmgr_connections=2 + +# Block size for volumes (default=blank means 8KB) (string +# value) +#nexenta_blocksize= + +# Enables or disables the creation of sparse volumes (boolean +# value) +#nexenta_sparse=false + + +# +# Options defined in cinder.volume.drivers.nfs +# + +# File with the list of available nfs shares (string value) +#nfs_shares_config=/etc/cinder/nfs_shares + +# Create volumes as sparsed files which take no space.If set +# to False volume is created as regular file.In such case +# volume creation takes a lot of time. (boolean value) +#nfs_sparsed_volumes=true + +# Percent of ACTUAL usage of the underlying volume before no +# new volumes can be allocated to the volume destination. +# (floating point value) +#nfs_used_ratio=0.95 + +# This will compare the allocated to available space on the +# volume destination. If the ratio exceeds this number, the +# destination will no longer be valid. (floating point value) +#nfs_oversub_ratio=1.0 + +# Base dir containing mount points for nfs shares. (string +# value) +#nfs_mount_point_base=$state_path/mnt + +# Mount options passed to the nfs client. See section of the +# nfs man page for details. (string value) +#nfs_mount_options= + + +# +# Options defined in cinder.volume.drivers.nimble +# + +# Nimble Controller pool name (string value) +#nimble_pool_name=default + +# Nimble Subnet Label (string value) +#nimble_subnet_label=* + + +# +# Options defined in cinder.volume.drivers.prophetstor.options +# + +# DPL pool uuid in which DPL volumes are stored. (string +# value) +#dpl_pool= + +# DPL port number. (integer value) +#dpl_port=8357 + + +# +# Options defined in cinder.volume.drivers.pure +# + +# REST API authorization token. (string value) +#pure_api_token= + + +# +# Options defined in cinder.volume.drivers.rbd +# + +# The RADOS pool where rbd volumes are stored (string value) +#rbd_pool=rbd + +# The RADOS client name for accessing rbd volumes - only set +# when using cephx authentication (string value) +#rbd_user= + +# Path to the ceph configuration file (string value) +#rbd_ceph_conf= + +# Flatten volumes created from snapshots to remove dependency +# from volume to snapshot (boolean value) +#rbd_flatten_volume_from_snapshot=false + +# The libvirt uuid of the secret for the rbd_user volumes +# (string value) +#rbd_secret_uuid= + +# Directory where temporary image files are stored when the +# volume driver does not write them directly to the volume. +# (string value) +#volume_tmp_dir= + +# Maximum number of nested volume clones that are taken before +# a flatten occurs. Set to 0 to disable cloning. (integer +# value) +#rbd_max_clone_depth=5 + +# Volumes will be chunked into objects of this size (in +# megabytes). (integer value) +#rbd_store_chunk_size=4 + +# Timeout value (in seconds) used when connecting to ceph +# cluster. If value < 0, no timeout is set and default +# librados value is used. (integer value) +#rados_connect_timeout=-1 + + +# +# Options defined in cinder.volume.drivers.remotefs +# + +# IP address or Hostname of NAS system. (string value) +#nas_ip= + +# User name to connect to NAS system. (string value) +#nas_login=admin + +# Password to connect to NAS system. (string value) +#nas_password= + +# SSH port to use to connect to NAS system. (integer value) +#nas_ssh_port=22 + +# Filename of private key to use for SSH authentication. +# (string value) +#nas_private_key= + + +# +# Options defined in cinder.volume.drivers.san.hp.hp_3par_common +# + +# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 +# (string value) +#hp3par_api_url= + +# 3PAR Super user username (string value) +#hp3par_username= + +# 3PAR Super user password (string value) +#hp3par_password= + +# The CPG to use for volume creation (string value) +#hp3par_cpg=OpenStack + +# The CPG to use for Snapshots for volumes. If empty +# hp3par_cpg will be used (string value) +#hp3par_cpg_snap= + +# The time in hours to retain a snapshot. You can't delete it +# before this expires. (string value) +#hp3par_snapshot_retention= + +# The time in hours when a snapshot expires and is deleted. +# This must be larger than expiration (string value) +#hp3par_snapshot_expiration= + +# Enable HTTP debugging to 3PAR (boolean value) +#hp3par_debug=false + +# List of target iSCSI addresses to use. (list value) +#hp3par_iscsi_ips= + +# Enable CHAP authentication for iSCSI connections. (boolean +# value) +#hp3par_iscsi_chap_enabled=false + + +# +# Options defined in cinder.volume.drivers.san.hp.hp_lefthand_rest_proxy +# + +# HP LeftHand WSAPI Server Url like https://:8081/lhos (string value) +#hplefthand_api_url= + +# HP LeftHand Super user username (string value) +#hplefthand_username= + +# HP LeftHand Super user password (string value) +#hplefthand_password= + +# HP LeftHand cluster name (string value) +#hplefthand_clustername= + +# Configure CHAP authentication for iSCSI connections +# (Default: Disabled) (boolean value) +#hplefthand_iscsi_chap_enabled=false + +# Enable HTTP debugging to LeftHand (boolean value) +#hplefthand_debug=false + + +# +# Options defined in cinder.volume.drivers.san.hp.hp_msa_common +# + +# The VDisk to use for volume creation. (string value) +#msa_vdisk=OpenStack + + +# +# Options defined in cinder.volume.drivers.san.san +# + +# Use thin provisioning for SAN volumes? (boolean value) +#san_thin_provision=true + +# IP address of SAN controller (string value) +#san_ip= + +# Username for SAN controller (string value) +#san_login=admin + +# Password for SAN controller (string value) +#san_password= + +# Filename of private key to use for SSH authentication +# (string value) +#san_private_key= + +# Cluster name to use for creating volumes (string value) +#san_clustername= + +# SSH port to use with SAN (integer value) +#san_ssh_port=22 + +# Execute commands locally instead of over SSH; use if the +# volume service is running on the SAN device (boolean value) +#san_is_local=false + +# SSH connection timeout in seconds (integer value) +#ssh_conn_timeout=30 + +# Minimum ssh connections in the pool (integer value) +#ssh_min_pool_conn=1 + +# Maximum ssh connections in the pool (integer value) +#ssh_max_pool_conn=5 + + +# +# Options defined in cinder.volume.drivers.san.solaris +# + +# The ZFS path under which to create zvols for volumes. +# (string value) +#san_zfs_volume_base=rpool/ + + +# +# Options defined in cinder.volume.drivers.scality +# + +# Path or URL to Scality SOFS configuration file (string +# value) +#scality_sofs_config= + +# Base dir where Scality SOFS shall be mounted (string value) +#scality_sofs_mount_point=$state_path/scality + +# Path from Scality SOFS root to volume dir (string value) +#scality_sofs_volume_dir=cinder/volumes + + +# +# Options defined in cinder.volume.drivers.smbfs +# + +# File with the list of available smbfs shares. (string value) +#smbfs_shares_config=/etc/cinder/smbfs_shares + +# Default format that will be used when creating volumes if no +# volume format is specified. Can be set to: raw, qcow2, vhd +# or vhdx. (string value) +#smbfs_default_volume_format=qcow2 + +# Create volumes as sparsed files which take no space rather +# than regular files when using raw format, in which case +# volume creation takes lot of time. (boolean value) +#smbfs_sparsed_volumes=true + +# Percent of ACTUAL usage of the underlying volume before no +# new volumes can be allocated to the volume destination. +# (floating point value) +#smbfs_used_ratio=0.95 + +# This will compare the allocated to available space on the +# volume destination. If the ratio exceeds this number, the +# destination will no longer be valid. (floating point value) +#smbfs_oversub_ratio=1.0 + +# Base dir containing mount points for smbfs shares. (string +# value) +#smbfs_mount_point_base=$state_path/mnt + +# Mount options passed to the smbfs client. See mount.cifs man +# page for details. (string value) +#smbfs_mount_options=noperm,file_mode=0775,dir_mode=0775 + + +# +# Options defined in cinder.volume.drivers.solidfire +# + +# Set 512 byte emulation on volume creation; (boolean value) +#sf_emulate_512=true + +# Allow tenants to specify QOS on create (boolean value) +#sf_allow_tenant_qos=false + +# Create SolidFire accounts with this prefix. Any string can +# be used here, but the string "hostname" is special and will +# create a prefix using the cinder node hostsname (previous +# default behavior). The default is NO prefix. (string value) +#sf_account_prefix= + +# SolidFire API port. Useful if the device api is behind a +# proxy on a different port. (integer value) +#sf_api_port=443 + + +# +# Options defined in cinder.volume.drivers.vmware.vmdk +# + +# IP address for connecting to VMware ESX/VC server. (string +# value) +#vmware_host_ip= + +# Username for authenticating with VMware ESX/VC server. +# (string value) +#vmware_host_username= + +# Password for authenticating with VMware ESX/VC server. +# (string value) +#vmware_host_password= + +# Optional VIM service WSDL Location e.g +# http:///vimService.wsdl. Optional over-ride to +# default location for bug work-arounds. (string value) +#vmware_wsdl_location= + +# Number of times VMware ESX/VC server API must be retried +# upon connection related issues. (integer value) +#vmware_api_retry_count=10 + +# The interval (in seconds) for polling remote tasks invoked +# on VMware ESX/VC server. (floating point value) +#vmware_task_poll_interval=0.5 + +# Name for the folder in the VC datacenter that will contain +# cinder volumes. (string value) +#vmware_volume_folder=cinder-volumes + +# Timeout in seconds for VMDK volume transfer between Cinder +# and Glance. (integer value) +#vmware_image_transfer_timeout_secs=7200 + +# Max number of objects to be retrieved per batch. Query +# results will be obtained in batches from the server and not +# in one shot. Server may still limit the count to something +# less than the configured value. (integer value) +#vmware_max_objects_retrieval=100 + +# Optional string specifying the VMware VC server version. The +# driver attempts to retrieve the version from VMware VC +# server. Set this configuration only if you want to override +# the VC server version. (string value) +#vmware_host_version= + +# Directory where virtual disks are stored during volume +# backup and restore. (string value) +#vmware_tmp_dir=/tmp + + +# +# Options defined in cinder.volume.drivers.windows.windows +# + +# Path to store VHD backed volumes (string value) +#windows_iscsi_lun_path=C:\iSCSIVirtualDisks + + +# +# Options defined in cinder.volume.drivers.zadara +# + +# Management IP of Zadara VPSA (string value) +#zadara_vpsa_ip= + +# Zadara VPSA port number (string value) +#zadara_vpsa_port= + +# Use SSL connection (boolean value) +#zadara_vpsa_use_ssl=false + +# User name for the VPSA (string value) +#zadara_user= + +# Password for the VPSA (string value) +#zadara_password= + +# Name of VPSA storage pool for volumes (string value) +#zadara_vpsa_poolname= + +# Default thin provisioning policy for volumes (boolean value) +#zadara_vol_thin=true + +# Default encryption policy for volumes (boolean value) +#zadara_vol_encrypt=false + +# Default template for VPSA volume names (string value) +#zadara_vol_name_template=OS_%s + +# Automatically detach from servers on volume delete (boolean +# value) +#zadara_vpsa_auto_detach_on_delete=true + +# Don't halt on deletion of non-existing volumes (boolean +# value) +#zadara_vpsa_allow_nonexistent_delete=true + + +# +# Options defined in cinder.volume.drivers.zfssa.zfssaiscsi +# + +# Storage pool name. (string value) +#zfssa_pool= + +# Project name. (string value) +#zfssa_project= + +# Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k. +# (string value) +#zfssa_lun_volblocksize=8k + +# Flag to enable sparse (thin-provisioned): True, False. +# (boolean value) +#zfssa_lun_sparse=false + +# Data compression-off, lzjb, gzip-2, gzip, gzip-9. (string +# value) +#zfssa_lun_compression= + +# Synchronous write bias-latency, throughput. (string value) +#zfssa_lun_logbias= + +# iSCSI initiator group. (string value) +#zfssa_initiator_group= + +# iSCSI initiator IQNs. (comma separated) (string value) +#zfssa_initiator= + +# iSCSI initiator CHAP user. (string value) +#zfssa_initiator_user= + +# iSCSI initiator CHAP password. (string value) +#zfssa_initiator_password= + +# iSCSI target group name. (string value) +#zfssa_target_group=tgt-grp + +# iSCSI target CHAP user. (string value) +#zfssa_target_user= + +# iSCSI target CHAP password. (string value) +#zfssa_target_password= + +# iSCSI target portal (Data-IP:Port, w.x.y.z:3260). (string +# value) +#zfssa_target_portal= + +# Network interfaces of iSCSI targets. (comma separated) +# (string value) +#zfssa_target_interfaces= + +# REST connection timeout. (seconds) (integer value) +#zfssa_rest_timeout= + + +# +# Options defined in cinder.volume.manager +# + +# Driver to use for volume creation (string value) +volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver + +# Timeout for creating the volume to migrate to when +# performing volume migration (seconds) (integer value) +#migration_create_volume_timeout_secs=300 + +# Offload pending volume delete during volume service startup +# (boolean value) +#volume_service_inithost_offload=false + +# FC Zoning mode configured (string value) +#zoning_mode=none + +# User defined capabilities, a JSON formatted string +# specifying key/value pairs. (string value) +#extra_capabilities={} + + +[BRCD_FABRIC_EXAMPLE] + +# +# Options defined in cinder.zonemanager.drivers.brocade.brcd_fabric_opts +# + +# Management IP of fabric (string value) +#fc_fabric_address= + +# Fabric user ID (string value) +#fc_fabric_user= + +# Password for user (string value) +#fc_fabric_password= + +# Connecting port (integer value) +#fc_fabric_port=22 + +# overridden zoning policy (string value) +#zoning_policy=initiator-target + +# overridden zoning activation state (boolean value) +#zone_activate=true + +# overridden zone name prefix (string value) +#zone_name_prefix= + +# Principal switch WWN of the fabric (string value) +#principal_switch_wwn= + + +[CISCO_FABRIC_EXAMPLE] + +# +# Options defined in cinder.zonemanager.drivers.cisco.cisco_fabric_opts +# + +# Management IP of fabric (string value) +#cisco_fc_fabric_address= + +# Fabric user ID (string value) +#cisco_fc_fabric_user= + +# Password for user (string value) +#cisco_fc_fabric_password= + +# Connecting port (integer value) +#cisco_fc_fabric_port=22 + +# overridden zoning policy (string value) +#cisco_zoning_policy=initiator-target + +# overridden zoning activation state (boolean value) +#cisco_zone_activate=true + +# overridden zone name prefix (string value) +#cisco_zone_name_prefix= + +# VSAN of the Fabric (string value) +#cisco_zoning_vsan= + + +[database] + +# +# Options defined in oslo.db +# + +# The file name to use with SQLite. (string value) +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the +# database. (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +connection=postgresql://{{ CINDER_DB_USER }}:{{ CINDER_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/cinder + +# The SQLAlchemy connection string to use to connect to the +# slave database. (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum number of database connection retries during +# startup. Set to -1 to specify an infinite retry count. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, +# 100=Everything. (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost. (boolean value) +#use_db_reconnect=false + +# Seconds between database connection retries. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between database connection +# retries up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between +# database connection retries. (integer value) +#db_max_retry_interval=10 + +# Maximum database connection retries before error is raised. +# Set to -1 to specify an infinite retry count. (integer +# value) +#db_max_retries=20 + + +# +# Options defined in oslo.db.concurrency +# + +# Enable the experimental use of thread pooling for all DB API +# calls (boolean value) +# Deprecated group/name - [DEFAULT]/dbapi_use_tpool +#use_tpool=false + + +[fc-zone-manager] + +# +# Options defined in cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver +# + +# Southbound connector for zoning operation (string value) +#brcd_sb_connector=cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.BrcdFCZoneClientCLI + + +# +# Options defined in cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver +# + +# Southbound connector for zoning operation (string value) +#cisco_sb_connector=cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI + + +# +# Options defined in cinder.zonemanager.fc_zone_manager +# + +# FC Zone Driver responsible for zone management (string +# value) +#zone_driver=cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver + +# Zoning policy configured by user (string value) +#zoning_policy=initiator-target + +# Comma separated list of fibre channel fabric names. This +# list of names is used to retrieve other SAN credentials for +# connecting to each SAN fabric (string value) +#fc_fabric_names= + +# FC San Lookup Service (string value) +#fc_san_lookup_service=cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService + + +[keymgr] + +# +# Options defined in cinder.keymgr +# + +# The full class name of the key manager API class (string +# value) +#api_class=cinder.keymgr.conf_key_mgr.ConfKeyManager + + +# +# Options defined in cinder.keymgr.conf_key_mgr +# + +# Fixed key returned by key manager, specified in hex (string +# value) +#fixed_key= + + +# +# Options defined in cinder.keymgr.key_mgr +# + +# Authentication url for encryption service. (string value) +#encryption_auth_url=http://localhost:5000/v2.0 + +# Url for encryption service. (string value) +#encryption_api_url=http://localhost:9311/v1 + + +[keystone_authtoken] + +# +# Options defined in keystonemiddleware.auth_token +# + +# Prefix to prepend at the beginning of the path. Deprecated, +# use identity_uri. (string value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint. Deprecated, +# use identity_uri. (string value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use +# identity_uri. (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint (http or https). +# Deprecated, use identity_uri. (string value) +#auth_protocol=https + +# Complete public Identity API endpoint (string value) +auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 + +# Complete admin Identity API endpoint. This should specify +# the unversioned root endpoint e.g. https://localhost:35357/ +# (string value) +identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 + +# API version of the admin Identity API endpoint (string +# value) +#auth_version= + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# This option is deprecated and may be removed in a future +# release. Single shared secret with the Keystone +# configuration used for bootstrapping a Keystone +# installation, or otherwise bypassing the normal +# authentication process. This option should not be used, use +# `admin_user` and `admin_password` instead. (string value) +#admin_token= + +# Keystone account username (string value) +admin_user={{ CINDER_SERVICE_USER }} + +# Keystone account password (string value) +admin_password={{ CINDER_SERVICE_PASSWORD }} + +# Keystone service account tenant name to validate user tokens +# (string value) +admin_tenant_name=service + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for +# caching. If left undefined, tokens will instead be cached +# in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating +# tokens, the middleware caches previously-seen tokens for a +# configurable duration (in seconds). Set to -1 to disable +# caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens +# is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache +# duration may significantly reduce performance. (integer +# value) +#revocation_cache_time=10 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) number of seconds memcached server is considered +# dead before it is tried again. (integer value) +#memcache_pool_dead_retry=300 + +# (optional) max total number of open connections to every +# memcached server. (integer value) +#memcache_pool_maxsize=10 + +# (optional) socket timeout in seconds for communicating with +# a memcache server. (integer value) +#memcache_pool_socket_timeout=3 + +# (optional) number of seconds a connection to memcached is +# held unused in the pool before it is closed. (integer value) +#memcache_pool_unused_timeout=60 + +# (optional) number of seconds that an operation will wait to +# get a memcache client connection from the pool. (integer +# value) +#memcache_pool_conn_get_timeout=10 + +# (optional) use the advanced (eventlet safe) memcache client +# pool. The advanced pool will only work under python 2.x. +# (boolean value) +#memcache_use_advanced_pool=false + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + +# If true, the revocation list will be checked for cached +# tokens. This requires that PKI tokens are configured on the +# Keystone server. (boolean value) +#check_revocations_for_cached=false + +# Hash algorithms to use for hashing PKI tokens. This may be a +# single algorithm or multiple. The algorithms are those +# supported by Python standard hashlib.new(). The hashes will +# be tried in the order given, so put the preferred one first +# for performance. The result of the first hash will be stored +# in the cache. This will typically be set to multiple values +# only while migrating from a less secure algorithm to a more +# secure one. Once all the old tokens are expired this option +# should be set to a single value for better performance. +# (list value) +#hash_algorithms=md5 + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[oslo_messaging_amqp] + +# +# Options defined in oslo.messaging +# +# NOTE: Options in this group are supported when using oslo.messaging >=1.5.0. + +# address prefix used when sending to a specific server +# (string value) +#server_request_prefix=exclusive + +# address prefix used when broadcasting to all servers (string +# value) +#broadcast_prefix=broadcast + +# address prefix when sending to any server in group (string +# value) +#group_request_prefix=unicast + +# Name for the AMQP container (string value) +#container_name= + +# Timeout for inactive connections (in seconds) (integer +# value) +#idle_timeout=0 + +# Debug: dump AMQP frames to stdout (boolean value) +#trace=false + +# CA certificate PEM file for verifing server certificate +# (string value) +#ssl_ca_file= + +# Identifying certificate PEM file to present to clients +# (string value) +#ssl_cert_file= + +# Private key PEM file used to sign cert_file certificate +# (string value) +#ssl_key_file= + +# Password for decrypting ssl_key_file (if encrypted) (string +# value) +#ssl_key_password= + +# Accept clients using either SSL or plain TCP (boolean value) +#allow_insecure_clients=false + + +[profiler] + +# +# Options defined in cinder.service +# + +# If False fully disable profiling feature. (boolean value) +#profiler_enabled=false + +# If False doesn't trace SQL requests. (boolean value) +#trace_sqlalchemy=false + + +[ssl] + +# +# Options defined in cinder.openstack.common.sslutils +# + +# CA certificate file to use to verify connecting clients +# (string value) +#ca_file= + +# Certificate file to use when starting the server securely +# (string value) +#cert_file= + +# Private key file to use when starting the server securely +# (string value) +#key_file= + + diff --git a/install-files/openstack/usr/share/openstack/cinder/policy.json b/install-files/openstack/usr/share/openstack/cinder/policy.json new file mode 100644 index 00000000..8f3a7b2f --- /dev/null +++ b/install-files/openstack/usr/share/openstack/cinder/policy.json @@ -0,0 +1,80 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "admin_api": "is_admin:True", + + "volume:create": "", + "volume:get_all": "", + "volume:get_volume_metadata": "", + "volume:get_volume_admin_metadata": "rule:admin_api", + "volume:delete_volume_admin_metadata": "rule:admin_api", + "volume:update_volume_admin_metadata": "rule:admin_api", + "volume:get_snapshot": "", + "volume:get_all_snapshots": "", + "volume:extend": "", + "volume:update_readonly_flag": "", + "volume:retype": "", + + "volume_extension:types_manage": "rule:admin_api", + "volume_extension:types_extra_specs": "rule:admin_api", + "volume_extension:volume_type_encryption": "rule:admin_api", + "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", + "volume_extension:extended_snapshot_attributes": "", + "volume_extension:volume_image_metadata": "", + + "volume_extension:quotas:show": "", + "volume_extension:quotas:update": "rule:admin_api", + "volume_extension:quota_classes": "", + + "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", + "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", + "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", + "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", + "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", + "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", + "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", + "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", + + "volume_extension:volume_host_attribute": "rule:admin_api", + "volume_extension:volume_tenant_attribute": "rule:admin_or_owner", + "volume_extension:volume_mig_status_attribute": "rule:admin_api", + "volume_extension:hosts": "rule:admin_api", + "volume_extension:services": "rule:admin_api", + + "volume_extension:volume_manage": "rule:admin_api", + "volume_extension:volume_unmanage": "rule:admin_api", + + "volume:services": "rule:admin_api", + + "volume:create_transfer": "", + "volume:accept_transfer": "", + "volume:delete_transfer": "", + "volume:get_all_transfers": "", + + "volume_extension:replication:promote": "rule:admin_api", + "volume_extension:replication:reenable": "rule:admin_api", + + "backup:create" : "", + "backup:delete": "", + "backup:get": "", + "backup:get_all": "", + "backup:restore": "", + "backup:backup-import": "rule:admin_api", + "backup:backup-export": "rule:admin_api", + + "snapshot_extension:snapshot_actions:update_snapshot_status": "", + + "consistencygroup:create" : "group:nobody", + "consistencygroup:delete": "group:nobody", + "consistencygroup:get": "group:nobody", + "consistencygroup:get_all": "group:nobody", + + "consistencygroup:create_cgsnapshot" : "", + "consistencygroup:delete_cgsnapshot": "", + "consistencygroup:get_cgsnapshot": "", + "consistencygroup:get_all_cgsnapshots": "", + + "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api" +} diff --git a/install-files/openstack/usr/share/openstack/extras/00-disable-device.network b/install-files/openstack/usr/share/openstack/extras/00-disable-device.network new file mode 100644 index 00000000..8e2532d0 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/extras/00-disable-device.network @@ -0,0 +1,2 @@ +[Match] +Name={{ item }} diff --git a/install-files/openstack/usr/share/openstack/extras/60-device-dhcp.network b/install-files/openstack/usr/share/openstack/extras/60-device-dhcp.network new file mode 100644 index 00000000..6fdbfd8d --- /dev/null +++ b/install-files/openstack/usr/share/openstack/extras/60-device-dhcp.network @@ -0,0 +1,5 @@ +[Match] +Name={{ item }} + +[Network] +DHCP=yes diff --git a/install-files/openstack/usr/share/openstack/glance.yml b/install-files/openstack/usr/share/openstack/glance.yml new file mode 100644 index 00000000..aa7e4c78 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance.yml @@ -0,0 +1,93 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/glance.conf" + tasks: + - name: Create the glance user. + user: + name: glance + comment: Openstack Glance Daemons + shell: /sbin/nologin + home: /var/lib/glance + + - name: Create the /var folders for glance + file: + path: "{{ item }}" + state: directory + owner: glance + group: glance + with_items: + - /var/run/glance + - /var/lock/glance + - /var/log/glance + - /var/lib/glance + - /var/lib/glance/images + - /var/lib/glance/image-cache + + - name: Create /etc/glance directory + file: + path: /etc/glance + state: directory + + - name: Add the configuration needed for glance in /etc/glance using templates + template: + src: /usr/share/openstack/glance/{{ item }} + dest: /etc/glance/{{ item }} + with_lines: + - cd /usr/share/openstack/glance && find -type f + + - name: Create glance service user in service tenant + keystone_user: + user: "{{ GLANCE_SERVICE_USER }}" + password: "{{ GLANCE_SERVICE_PASSWORD }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Assign admin role to glances service user in the service tenant + keystone_user: + role: admin + user: "{{ GLANCE_SERVICE_USER }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Add glance endpoint + keystone_service: + name: glance + type: image + description: Openstack Image Service + publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292 + internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292 + adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292 + region: regionOne + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Create postgresql user for glance + postgresql_user: + name: "{{ GLANCE_DB_USER }}" + login_host: "{{ CONTROLLER_HOST_ADDRESS }}" + password: "{{ GLANCE_DB_PASSWORD }}" + sudo: yes + sudo_user: glance + + - name: Create database for glance services + postgresql_db: + name: glance + owner: "{{ GLANCE_DB_USER }}" + login_host: "{{ CONTROLLER_HOST_ADDRESS }}" + sudo: yes + sudo_user: glance + + - name: Initiate glance database + glance_manage: + action: dbsync + sudo: yes + sudo_user: glance + + - name: Enable and start openstack-glance services + service: + name: "{{ item }}" + enabled: yes + state: started + with_items: + - openstack-glance-api.service + - openstack-glance-registry.service diff --git a/install-files/openstack/usr/share/openstack/glance/glance-api-paste.ini b/install-files/openstack/usr/share/openstack/glance/glance-api-paste.ini new file mode 100644 index 00000000..86a4cdb1 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance/glance-api-paste.ini @@ -0,0 +1,77 @@ +# Use this pipeline for no auth or image caching - DEFAULT +[pipeline:glance-api] +pipeline = versionnegotiation osprofiler unauthenticated-context rootapp + +# Use this pipeline for image caching and no auth +[pipeline:glance-api-caching] +pipeline = versionnegotiation osprofiler unauthenticated-context cache rootapp + +# Use this pipeline for caching w/ management interface but no auth +[pipeline:glance-api-cachemanagement] +pipeline = versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp + +# Use this pipeline for keystone auth +[pipeline:glance-api-keystone] +pipeline = versionnegotiation osprofiler authtoken context rootapp + +# Use this pipeline for keystone auth with image caching +[pipeline:glance-api-keystone+caching] +pipeline = versionnegotiation osprofiler authtoken context cache rootapp + +# Use this pipeline for keystone auth with caching and cache management +[pipeline:glance-api-keystone+cachemanagement] +pipeline = versionnegotiation osprofiler authtoken context cache cachemanage rootapp + +# Use this pipeline for authZ only. This means that the registry will treat a +# user as authenticated without making requests to keystone to reauthenticate +# the user. +[pipeline:glance-api-trusted-auth] +pipeline = versionnegotiation osprofiler context rootapp + +# Use this pipeline for authZ only. This means that the registry will treat a +# user as authenticated without making requests to keystone to reauthenticate +# the user and uses cache management +[pipeline:glance-api-trusted-auth+cachemanagement] +pipeline = versionnegotiation osprofiler context cache cachemanage rootapp + +[composite:rootapp] +paste.composite_factory = glance.api:root_app_factory +/: apiversions +/v1: apiv1app +/v2: apiv2app + +[app:apiversions] +paste.app_factory = glance.api.versions:create_resource + +[app:apiv1app] +paste.app_factory = glance.api.v1.router:API.factory + +[app:apiv2app] +paste.app_factory = glance.api.v2.router:API.factory + +[filter:versionnegotiation] +paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory + +[filter:cache] +paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory + +[filter:cachemanage] +paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory + +[filter:context] +paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory + +[filter:unauthenticated-context] +paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +delay_auth_decision = true + +[filter:gzip] +paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory +hmac_keys = SECRET_KEY +enabled = yes diff --git a/install-files/openstack/usr/share/openstack/glance/glance-api.conf b/install-files/openstack/usr/share/openstack/glance/glance-api.conf new file mode 100644 index 00000000..39257a6d --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance/glance-api.conf @@ -0,0 +1,699 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Which backend scheme should Glance use by default is not specified +# in a request to add a new image to Glance? Known schemes are determined +# by the known_stores option below. +# Default: 'file' +default_store = file + +# Maximum image size (in bytes) that may be uploaded through the +# Glance API server. Defaults to 1 TB. +# WARNING: this value should only be increased after careful consideration +# and must be set to a value under 8 EB (9223372036854775808). +#image_size_cap = 1099511627776 + +# Address to bind the API server +bind_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# Port the bind the API server to +bind_port = 9292 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +# log_file = /var/log/glance/api.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# API to use for accessing data. Default value points to sqlalchemy +# package, it is also possible to use: glance.db.registry.api +# data_api = glance.db.sqlalchemy.api + +# The number of child process workers that will be +# created to service API requests. The default will be +# equal to the number of CPUs available. (integer value) +#workers = 4 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large tokens +# (typically those generated by the Keystone v3 API with big service +# catalogs) +# max_header_line = 16384 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Allow unauthenticated users to access the API with read-only +# privileges. This only applies when using ContextMiddleware. +#allow_anonymous_access = False + +# Allow access to version 1 of glance api +#enable_v1_api = True + +# Allow access to version 2 of glance api +#enable_v2_api = True + +# Return the URL that references where the data is stored on +# the backend storage system. For example, if using the +# file system store a URL of 'file:///path/to/image' will +# be returned to the user in the 'direct_url' meta-data field. +# The default value is false. +#show_image_direct_url = False + +# Send headers containing user and tenant information when making requests to +# the v1 glance registry. This allows the registry to function as if a user is +# authenticated without the need to authenticate a user itself using the +# auth_token middleware. +# The default value is false. +#send_identity_headers = False + +# Supported values for the 'container_format' image attribute +#container_formats=ami,ari,aki,bare,ovf,ova + +# Supported values for the 'disk_format' image attribute +#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso + +# Directory to use for lock files. Default to a temp directory +# (string value). This setting needs to be the same for both +# glance-scrubber and glance-api. +#lock_path= + +# Property Protections config file +# This file contains the rules for property protections and the roles/policies +# associated with it. +# If this config value is not specified, by default, property protections +# won't be enforced. +# If a value is specified and the file is not found, then the glance-api +# service will not start. +#property_protection_file = + +# Specify whether 'roles' or 'policies' are used in the +# property_protection_file. +# The default value for property_protection_rule_format is 'roles'. +#property_protection_rule_format = roles + +# This value sets what strategy will be used to determine the image location +# order. Currently two strategies are packaged with Glance 'location_order' +# and 'store_type'. +#location_strategy = location_order + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +use_syslog = True + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL0 + +# ================= SSL Options =============================== + +# Certificate file to use when starting API server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> + +# ============ Registry Options =============================== + +# Address to find the registry server +registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# Port the registry server is listening on +registry_port = 9191 + +# What protocol to use when connecting to the registry server? +# Set to https for secure HTTP communication +registry_client_protocol = http + +# The path to the key file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file +#registry_client_key_file = /path/to/key/file + +# The path to the cert file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file +#registry_client_cert_file = /path/to/cert/file + +# The path to the certifying authority cert file to use in SSL connections +# to the registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file +#registry_client_ca_file = /path/to/ca/file + +# When using SSL in connections to the registry server, do not require +# validation via a certifying authority. This is the registry's equivalent of +# specifying --insecure on the command line using glanceclient for the API +# Default: False +#registry_client_insecure = False + +# The period of time, in seconds, that the API server will wait for a registry +# request to complete. A value of '0' implies no timeout. +# Default: 600 +#registry_client_timeout = 600 + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create = False + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug = True + +# Pass the user's token through for API requests to the registry. +# Default: True +#use_user_token = True + +# If 'use_user_token' is not in effect then admin credentials +# can be specified. Requests to the registry on behalf of +# the API will use these credentials. +# Admin user name +#admin_user = None +# Admin password +#admin_password = None +# Admin tenant name +#admin_tenant_name = None +# Keystone endpoint +#auth_url = None +# Keystone region +#auth_region = None +# Auth strategy +#auth_strategy = keystone + +# ============ Notification System Options ===================== + +# Driver or drivers to handle sending notifications. Set to +# 'messaging' to send notifications to a message queue. +notification_driver = messagingv2 + +# Default publisher_id for outgoing notifications. +# default_publisher_id = image.localhost + +# Messaging driver used for 'messaging' notifications driver +rpc_backend=rabbit + +# Configuration options if sending notifications via rabbitmq +rabbit_host = {{ RABBITMQ_HOST }} +rabbit_port = {{ RABBITMQ_PORT }} +rabbit_use_ssl = false +rabbit_userid = {{ RABBITMQ_USER }} +rabbit_password = {{ RABBITMQ_PASSWORD }} +rabbit_virtual_host = / +rabbit_notification_exchange = glance +rabbit_notification_topic = notifications +rabbit_durable_queues = False + +# Configuration options if sending notifications via Qpid (these are +# the defaults) +#qpid_notification_exchange = glance +#qpid_notification_topic = notifications +#qpid_hostname = localhost +#qpid_port = 5672 +#qpid_username = +#qpid_password = +#qpid_sasl_mechanisms = +#qpid_reconnect_timeout = 0 +#qpid_reconnect_limit = 0 +#qpid_reconnect_interval_min = 0 +#qpid_reconnect_interval_max = 0 +#qpid_reconnect_interval = 0 +#qpid_heartbeat = 5 +# Set to 'ssl' to enable SSL +#qpid_protocol = tcp +#qpid_tcp_nodelay = True + +# ============ Delayed Delete Options ============================= + +# Turn on/off delayed delete +delayed_delete = False + +# Delayed delete time in seconds +scrub_time = 43200 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-scrubber.conf +scrubber_datadir = /var/lib/glance/scrubber + +# =============== Quota Options ================================== + +# The maximum number of image members allowed per image +#image_member_quota = 128 + +# The maximum number of image properties allowed per image +#image_property_quota = 128 + +# The maximum number of tags allowed per image +#image_tag_quota = 128 + +# The maximum number of locations allowed per image +#image_location_quota = 10 + +# Set a system wide quota for every user. This value is the total number +# of bytes that a user can use across all storage systems. A value of +# 0 means unlimited. +#user_storage_quota = 0 + +# =============== Image Cache Options ============================= + +# Base directory that the Image Cache uses +image_cache_dir = /var/lib/glance/image-cache/ + +# =============== Database Options ================================= + +[database] +# The file name to use with SQLite (string value) +#sqlite_db = oslo.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = +connection=postgresql://{{ GLANCE_DB_USER }}:{{ GLANCE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/glance + + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect = False + +# seconds between db connection retries (integer value) +#db_retry_interval = 1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval = True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval = 10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries = 20 + +[keystone_authtoken] +auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 +identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 +admin_tenant_name = service +admin_user = {{ GLANCE_SERVICE_USER }} +admin_password = {{ GLANCE_SERVICE_PASSWORD }} +revocation_cache_time = 10 + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file = glance-api-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-api-keystone], you would configure the flavor below +# as 'keystone'. +flavor=keystone + +[store_type_location_strategy] +# The scheme list to use to get store preference order. The scheme must be +# registered by one of the stores defined by the 'known_stores' config option. +# This option will be applied when you using 'store_type' option as image +# location strategy defined by the 'location_strategy' config option. +#store_type_preference = + +[profiler] +# If False fully disable profiling feature. +#enabled = False + +# If False doesn't trace SQL requests. +#trace_sqlalchemy = False + +[task] +# ================= Glance Tasks Options ============================ + +# Specifies how long (in hours) a task is supposed to live in the tasks DB +# after succeeding or failing before getting soft-deleted. +# The default value for task_time_to_live is 48 hours. +# task_time_to_live = 48 + +# Specifies which task executor to be used to run the task scripts. +# The default value for task_executor is eventlet. +# task_executor = eventlet + +# Specifies the maximum number of eventlet threads which can be spun up by +# the eventlet based task executor to perform execution of Glance tasks. +# eventlet_executor_pool_size = 1000 + +[glance_store] +# List of which store classes and store class locations are +# currently known to glance at startup. +# Existing but disabled stores: +# glance.store.rbd.Store, +# glance.store.s3.Store, +# glance.store.swift.Store, +# glance.store.sheepdog.Store, +# glance.store.cinder.Store, +# glance.store.gridfs.Store, +# glance.store.vmware_datastore.Store, +#stores = glance.store.filesystem.Store, +# glance.store.http.Store + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +filesystem_store_datadir = /var/lib/glance/images/ + +# A list of directories where image data can be stored. +# This option may be specified multiple times for specifying multiple store +# directories. Either one of filesystem_store_datadirs or +# filesystem_store_datadir option is required. A priority number may be given +# after each directory entry, separated by a ":". +# When adding an image, the highest priority directory will be selected, unless +# there is not enough space available in cases where the image size is already +# known. If no priority is given, it is assumed to be zero and the directory +# will be considered for selection last. If multiple directories have the same +# priority, then the one with the most free space available is selected. +# If same store is specified multiple times then BadStoreConfiguration +# exception will be raised. +#filesystem_store_datadirs = /var/lib/glance/images/:1 + +# A path to a JSON file that contains metadata describing the storage +# system. When show_multiple_locations is True the information in this +# file will be returned with any location that is contained in this +# store. +#filesystem_store_metadata_file = None + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +swift_store_auth_version = 2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like '127.0.0.1:8080/v1.0/' +swift_store_auth_address = 127.0.0.1:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +swift_store_user = jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +swift_store_key = a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +swift_store_container = glance + +# Do we create the container if it does not exist? +swift_store_create_container_on_put = False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +swift_store_large_object_size = 5120 + +# swift_store_config_file = glance-swift.conf +# This file contains references for each of the configured +# Swift accounts/backing stores. If used, this option can prevent +# credentials being stored in the database. Using Swift references +# is disabled if this config is left blank. + +# The reference to the default Swift parameters to use for adding new images. +# default_swift_reference = 'ref1' + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +swift_store_large_object_chunk_size = 200 + +# Whether to use ServiceNET to communicate with the Swift storage servers. +# (If you aren't RACKSPACE, leave this False!) +# +# To use ServiceNET for authentication, prefix hostname of +# `swift_store_auth_address` with 'snet-'. +# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ +swift_enable_snet = False + +# If set to True enables multi-tenant storage mode which causes Glance images +# to be stored in tenant specific Swift accounts. +#swift_store_multi_tenant = False + +# A list of swift ACL strings that will be applied as both read and +# write ACLs to the containers created by Glance in multi-tenant +# mode. This grants the specified tenants/users read and write access +# to all newly created image objects. The standard swift ACL string +# formats are allowed, including: +# : +# : +# *: +# Multiple ACLs can be combined using a comma separated list, for +# example: swift_store_admin_tenants = service:glance,*:admin +#swift_store_admin_tenants = + +# The region of the swift endpoint to be used for single tenant. This setting +# is only necessary if the tenant has multiple swift endpoints. +#swift_store_region = + +# If set to False, disables SSL layer compression of https swift requests. +# Setting to 'False' may improve performance for images which are already +# in a compressed format, eg qcow2. If set to True, enables SSL layer +# compression (provided it is supported by the target swift proxy). +#swift_store_ssl_compression = True + +# The number of times a Swift download will be retried before the +# request fails +#swift_store_retry_get_count = 0 + +# Bypass SSL verification for Swift +#swift_store_auth_insecure = False + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +s3_store_host = 127.0.0.1:8080/v1.0/ + +# User to authenticate against the S3 authentication service +s3_store_access_key = <20-char AWS access key> + +# Auth key for the user authenticating against the +# S3 authentication service +s3_store_secret_key = <40-char AWS secret key> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +s3_store_bucket = glance + +# Do we create the bucket if it does not exist? +s3_store_create_bucket_on_put = False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +#s3_store_object_buffer_dir = /path/to/dir + +# When forming a bucket url, boto will either set the bucket name as the +# subdomain or as the first token of the path. Amazon's S3 service will +# accept it as the subdomain, but Swift's S3 middleware requires it be +# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'. +#s3_store_bucket_url_format = subdomain + +# Size, in MB, should S3 start chunking image files +# and do a multipart upload in S3. The default is 100MB. +#s3_store_large_object_size = 100 + +# Multipart upload part size, in MB, should S3 use when uploading +# parts. The size must be greater than or equal to +# 5MB. The default is 10MB. +#s3_store_large_object_chunk_size = 10 + +# The number of thread pools to perform a multipart upload +# in S3. The default is 10. +#s3_store_thread_pools = 10 + +# ============ RBD Store Options ============================= + +# Ceph configuration file path +# If using cephx authentication, this file should +# include a reference to the right keyring +# in a client. section +#rbd_store_ceph_conf = /etc/ceph/ceph.conf + +# RADOS user to authenticate as (only applicable if using cephx) +# If , a default will be chosen based on the client. section +# in rbd_store_ceph_conf +#rbd_store_user = + +# RADOS pool in which images are stored +#rbd_store_pool = images + +# RADOS images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +#rbd_store_chunk_size = 8 + +# ============ Sheepdog Store Options ============================= + +sheepdog_store_address = localhost + +sheepdog_store_port = 7000 + +# Images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +sheepdog_store_chunk_size = 64 + +# ============ Cinder Store Options =============================== + +# Info to match when looking for cinder in the service catalog +# Format is : separated values of the form: +# :: (string value) +#cinder_catalog_info = volume:cinder:publicURL + +# Override service catalog lookup with template for cinder endpoint +# e.g. http://localhost:8776/v1/%(project_id)s (string value) +#cinder_endpoint_template = + +# Region name of this node (string value) +#os_region_name = + +# Location of ca certicates file to use for cinder client requests +# (string value) +#cinder_ca_certificates_file = + +# Number of cinderclient retries on failed http calls (integer value) +#cinder_http_retries = 3 + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure = False + +# ============ VMware Datastore Store Options ===================== + +# ESX/ESXi or vCenter Server target system. +# The server value can be an IP address or a DNS name +# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com +#vmware_server_host = + +# Server username (string value) +#vmware_server_username = + +# Server password (string value) +#vmware_server_password = + +# Inventory path to a datacenter (string value) +# Value optional when vmware_server_ip is an ESX/ESXi host: if specified +# should be `ha-datacenter`. +#vmware_datacenter_path = + +# Datastore associated with the datacenter (string value) +#vmware_datastore_name = + +# The number of times we retry on failures +# e.g., socket error, etc (integer value) +#vmware_api_retry_count = 10 + +# The interval used for polling remote tasks +# invoked on VMware ESX/VC server in seconds (integer value) +#vmware_task_poll_interval = 5 + +# Absolute path of the folder containing the images in the datastore +# (string value) +#vmware_store_image_dir = /openstack_glance + +# Allow to perform insecure SSL requests to the target system (boolean value) +#vmware_api_insecure = False diff --git a/install-files/openstack/usr/share/openstack/glance/glance-cache.conf b/install-files/openstack/usr/share/openstack/glance/glance-cache.conf new file mode 100644 index 00000000..3f2d4603 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance/glance-cache.conf @@ -0,0 +1,200 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +# log_file = /var/log/glance/image-cache.log + +# Send logs to syslog (/dev/log) instead of to file specified by `log_file` +use_syslog = True + +# Directory that the Image Cache writes data to +image_cache_dir = /var/lib/glance/image-cache/ + +# Number of seconds after which we should consider an incomplete image to be +# stalled and eligible for reaping +image_cache_stall_time = 86400 + +# Max cache size in bytes +image_cache_max_size = 10737418240 + +# Address to find the registry server +registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# Port the registry server is listening on +registry_port = 9191 + +# Auth settings if using Keystone +# auth_url = http://127.0.0.1:5000/v2.0/ +# admin_tenant_name = %SERVICE_TENANT_NAME% +# admin_user = %SERVICE_USER% +# admin_password = %SERVICE_PASSWORD% + +# List of which store classes and store class locations are +# currently known to glance at startup. +# known_stores = glance.store.filesystem.Store, +# glance.store.http.Store, +# glance.store.rbd.Store, +# glance.store.s3.Store, +# glance.store.swift.Store, +# glance.store.sheepdog.Store, +# glance.store.cinder.Store, +# glance.store.vmware_datastore.Store, + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +filesystem_store_datadir = /var/lib/glance/images/ + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +swift_store_auth_version = 2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like '127.0.0.1:8080/v1.0/' +swift_store_auth_address = 127.0.0.1:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +swift_store_user = jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +swift_store_key = a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +swift_store_container = glance + +# Do we create the container if it does not exist? +swift_store_create_container_on_put = False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +swift_store_large_object_size = 5120 + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +swift_store_large_object_chunk_size = 200 + +# Whether to use ServiceNET to communicate with the Swift storage servers. +# (If you aren't RACKSPACE, leave this False!) +# +# To use ServiceNET for authentication, prefix hostname of +# `swift_store_auth_address` with 'snet-'. +# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ +swift_enable_snet = False + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +s3_store_host = 127.0.0.1:8080/v1.0/ + +# User to authenticate against the S3 authentication service +s3_store_access_key = <20-char AWS access key> + +# Auth key for the user authenticating against the +# S3 authentication service +s3_store_secret_key = <40-char AWS secret key> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +s3_store_bucket = glance + +# Do we create the bucket if it does not exist? +s3_store_create_bucket_on_put = False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +# s3_store_object_buffer_dir = /path/to/dir + +# ============ Cinder Store Options =========================== + +# Info to match when looking for cinder in the service catalog +# Format is : separated values of the form: +# :: (string value) +#cinder_catalog_info = volume:cinder:publicURL + +# Override service catalog lookup with template for cinder endpoint +# e.g. http://localhost:8776/v1/%(project_id)s (string value) +#cinder_endpoint_template = + +# Region name of this node (string value) +#os_region_name = + +# Location of ca certicates file to use for cinder client requests +# (string value) +#cinder_ca_certificates_file = + +# Number of cinderclient retries on failed http calls (integer value) +#cinder_http_retries = 3 + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure = False + +# ============ VMware Datastore Store Options ===================== + +# ESX/ESXi or vCenter Server target system. +# The server value can be an IP address or a DNS name +# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com +#vmware_server_host = + +# Server username (string value) +#vmware_server_username = + +# Server password (string value) +#vmware_server_password = + +# Inventory path to a datacenter (string value) +# Value optional when vmware_server_ip is an ESX/ESXi host: if specified +# should be `ha-datacenter`. +#vmware_datacenter_path = + +# Datastore associated with the datacenter (string value) +#vmware_datastore_name = + +# The number of times we retry on failures +# e.g., socket error, etc (integer value) +#vmware_api_retry_count = 10 + +# The interval used for polling remote tasks +# invoked on VMware ESX/VC server in seconds (integer value) +#vmware_task_poll_interval = 5 + +# Absolute path of the folder containing the images in the datastore +# (string value) +#vmware_store_image_dir = /openstack_glance + +# Allow to perform insecure SSL requests to the target system (boolean value) +#vmware_api_insecure = False + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +# metadata_encryption_key = <16, 24 or 32 char registry metadata key> diff --git a/install-files/openstack/usr/share/openstack/glance/glance-registry-paste.ini b/install-files/openstack/usr/share/openstack/glance/glance-registry-paste.ini new file mode 100644 index 00000000..df403f6e --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance/glance-registry-paste.ini @@ -0,0 +1,30 @@ +# Use this pipeline for no auth - DEFAULT +[pipeline:glance-registry] +pipeline = osprofiler unauthenticated-context registryapp + +# Use this pipeline for keystone auth +[pipeline:glance-registry-keystone] +pipeline = osprofiler authtoken context registryapp + +# Use this pipeline for authZ only. This means that the registry will treat a +# user as authenticated without making requests to keystone to reauthenticate +# the user. +[pipeline:glance-registry-trusted-auth] +pipeline = osprofiler context registryapp + +[app:registryapp] +paste.app_factory = glance.registry.api:API.factory + +[filter:context] +paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory + +[filter:unauthenticated-context] +paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory +hmac_keys = SECRET_KEY +enabled = yes diff --git a/install-files/openstack/usr/share/openstack/glance/glance-registry.conf b/install-files/openstack/usr/share/openstack/glance/glance-registry.conf new file mode 100644 index 00000000..302f4138 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance/glance-registry.conf @@ -0,0 +1,245 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Address to bind the registry server +bind_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# Port the bind the registry server to +bind_port = 9191 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +# log_file = /var/log/glance/registry.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# API to use for accessing data. Default value points to sqlalchemy +# package. +#data_api = glance.db.sqlalchemy.api + +# The number of child process workers that will be +# created to service Registry requests. The default will be +# equal to the number of CPUs available. (integer value) +#workers = None + +# Enable Registry API versions individually or simultaneously +#enable_v1_registry = True +#enable_v2_registry = True + +# Limit the api to return `param_limit_max` items in a call to a container. If +# a larger `limit` query param is provided, it will be reduced to this value. +api_limit_max = 1000 + +# If a `limit` query param is not provided in an api request, it will +# default to `limit_param_default` +limit_param_default = 25 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create = False + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug = True + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +use_syslog = True + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL1 + +# ================= SSL Options =============================== + +# Certificate file to use when starting registry server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting registry server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ============ Notification System Options ===================== + +# Driver or drivers to handle sending notifications. Set to +# 'messaging' to send notifications to a message queue. +notification_driver = messagingv2 + +# Default publisher_id for outgoing notifications. +# default_publisher_id = image.localhost + +# Messaging driver used for 'messaging' notifications driver +rpc_backend=rabbit + +# Configuration options if sending notifications via rabbitmq +rabbit_host = {{ RABBITMQ_HOST }} +rabbit_port = {{ RABBITMQ_PORT }} +rabbit_use_ssl = false +rabbit_userid = {{ RABBITMQ_USER }} +rabbit_password = {{ RABBITMQ_PASSWORD }} +rabbit_virtual_host = / +rabbit_notification_exchange = glance +rabbit_notification_topic = notifications +rabbit_durable_queues = False + +# Configuration options if sending notifications via Qpid (these are +# the defaults) +qpid_notification_exchange = glance +qpid_notification_topic = notifications +qpid_hostname = localhost +qpid_port = 5672 +qpid_username = +qpid_password = +qpid_sasl_mechanisms = +qpid_reconnect_timeout = 0 +qpid_reconnect_limit = 0 +qpid_reconnect_interval_min = 0 +qpid_reconnect_interval_max = 0 +qpid_reconnect_interval = 0 +qpid_heartbeat = 5 +# Set to 'ssl' to enable SSL +qpid_protocol = tcp +qpid_tcp_nodelay = True + + +# ================= Database Options ========================== + +[database] +# The file name to use with SQLite (string value) +#sqlite_db = glance.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = +connection=postgresql://{{ GLANCE_DB_USER }}:{{ GLANCE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/glance + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect = False + +# seconds between db connection retries (integer value) +#db_retry_interval = 1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval = True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval = 10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries = 20 + +[keystone_authtoken] +auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 +identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 +admin_tenant_name = service +admin_user = {{ GLANCE_SERVICE_USER }} +admin_password = {{ GLANCE_SERVICE_PASSWORD }} + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file = glance-registry-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-registry-keystone], you would configure the flavor below +# as 'keystone'. +flavor=keystone + +[profiler] +# If False fully disable profiling feature. +#enabled = False + +# If False doesn't trace SQL requests. +#trace_sqlalchemy = False diff --git a/install-files/openstack/usr/share/openstack/glance/glance-scrubber.conf b/install-files/openstack/usr/share/openstack/glance/glance-scrubber.conf new file mode 100644 index 00000000..cdbfda71 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance/glance-scrubber.conf @@ -0,0 +1,108 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +# log_file = /var/log/glance/scrubber.log + +# Send logs to syslog (/dev/log) instead of to file specified by `log_file` +use_syslog = True + +# Should we run our own loop or rely on cron/scheduler to run us +daemon = False + +# Loop time between checking for new items to schedule for delete +wakeup_time = 300 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-api.conf +scrubber_datadir = /var/lib/glance/scrubber + +# Only one server in your deployment should be designated the cleanup host +cleanup_scrubber = False + +# pending_delete items older than this time are candidates for cleanup +cleanup_scrubber_time = 86400 + +# Address to find the registry server for cleanups +registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# Port the registry server is listening on +registry_port = 9191 + +# Auth settings if using Keystone +# auth_url = http://127.0.0.1:5000/v2.0/ +# admin_tenant_name = %SERVICE_TENANT_NAME% +# admin_user = %SERVICE_USER% +# admin_password = %SERVICE_PASSWORD% + +# Directory to use for lock files. Default to a temp directory +# (string value). This setting needs to be the same for both +# glance-scrubber and glance-api. +#lock_path= + +# API to use for accessing data. Default value points to sqlalchemy +# package, it is also possible to use: glance.db.registry.api +#data_api = glance.db.sqlalchemy.api + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> + +# ================= Database Options ===============+========== + +[database] + +# The SQLAlchemy connection string used to connect to the +# database (string value) +#connection=sqlite:////glance/openstack/common/db/$sqlite_db + +# The SQLAlchemy connection string used to connect to the +# slave database (string value) +#slave_connection= + +# timeout before idle sql connections are reaped (integer +# value) +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +#max_pool_size= + +# maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +#max_retries=10 + +# interval between retries of opening a sql connection +# (integer value) +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +#pool_timeout= diff --git a/install-files/openstack/usr/share/openstack/glance/logging.conf b/install-files/openstack/usr/share/openstack/glance/logging.conf new file mode 100644 index 00000000..7e7f31f0 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance/logging.conf @@ -0,0 +1,54 @@ +[loggers] +keys=root,api,registry,combined + +[formatters] +keys=normal,normal_with_name,debug + +[handlers] +keys=production,file,devel + +[logger_root] +level=NOTSET +handlers=devel + +[logger_api] +level=DEBUG +handlers=devel +qualname=glance-api + +[logger_registry] +level=DEBUG +handlers=devel +qualname=glance-registry + +[logger_combined] +level=DEBUG +handlers=devel +qualname=glance-combined + +[handler_production] +class=handlers.SysLogHandler +level=ERROR +formatter=normal_with_name +args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) + +[handler_file] +class=FileHandler +level=DEBUG +formatter=normal_with_name +args=('glance.log', 'w') + +[handler_devel] +class=StreamHandler +level=NOTSET +formatter=debug +args=(sys.stdout,) + +[formatter_normal] +format=%(asctime)s %(levelname)s %(message)s + +[formatter_normal_with_name] +format=(%(name)s): %(asctime)s %(levelname)s %(message)s + +[formatter_debug] +format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/install-files/openstack/usr/share/openstack/glance/policy.json b/install-files/openstack/usr/share/openstack/glance/policy.json new file mode 100644 index 00000000..325f00b2 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance/policy.json @@ -0,0 +1,52 @@ +{ + "context_is_admin": "role:admin", + "default": "", + + "add_image": "", + "delete_image": "", + "get_image": "", + "get_images": "", + "modify_image": "", + "publicize_image": "role:admin", + "copy_from": "", + + "download_image": "", + "upload_image": "", + + "delete_image_location": "", + "get_image_location": "", + "set_image_location": "", + + "add_member": "", + "delete_member": "", + "get_member": "", + "get_members": "", + "modify_member": "", + + "manage_image_cache": "role:admin", + + "get_task": "", + "get_tasks": "", + "add_task": "", + "modify_task": "", + + "get_metadef_namespace": "", + "get_metadef_namespaces":"", + "modify_metadef_namespace":"", + "add_metadef_namespace":"", + + "get_metadef_object":"", + "get_metadef_objects":"", + "modify_metadef_object":"", + "add_metadef_object":"", + + "list_metadef_resource_types":"", + "get_metadef_resource_type":"", + "add_metadef_resource_type_association":"", + + "get_metadef_property":"", + "get_metadef_properties":"", + "modify_metadef_property":"", + "add_metadef_property":"" + +} diff --git a/install-files/openstack/usr/share/openstack/glance/schema-image.json b/install-files/openstack/usr/share/openstack/glance/schema-image.json new file mode 100644 index 00000000..5aafd6b3 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/glance/schema-image.json @@ -0,0 +1,28 @@ +{ + "kernel_id": { + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." + }, + "ramdisk_id": { + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." + }, + "instance_uuid": { + "type": "string", + "description": "ID of instance used to create this image." + }, + "architecture": { + "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", + "type": "string" + }, + "os_distro": { + "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", + "type": "string" + }, + "os_version": { + "description": "Operating system version as specified by the distributor", + "type": "string" + } +} diff --git a/install-files/openstack/usr/share/openstack/horizon.yml b/install-files/openstack/usr/share/openstack/horizon.yml new file mode 100644 index 00000000..14cea5c5 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/horizon.yml @@ -0,0 +1,47 @@ +--- +- hosts: localhost + tasks: + +# Setup apache, this may end up in apache.yml + - name: Create the apache user. + user: + name: apache + comment: Apache Server + shell: /sbin/nologin + home: /var/www + + - file: + path: /usr/sbin/suexec + group: apache + mode: 4750 + +# Setup horizon + - name: Create the horizon user. + user: + name: horizon + comment: Openstack Horizon User + shell: /sbin/nologin + home: /var/lib/horizon + + - name: Create the /var folders for horizon + file: + path: "{{ item }}" + state: directory + owner: horizon + group: horizon + with_items: + - /var/lib/horizon + + - name: Link horizon apache configuration + file: + src: /etc/horizon/apache-horizon.conf + dest: /etc/httpd/conf.d/apache-horizon.conf + state: link + + - name: Enable and start apache services needed by horizon + service: + name: "{{ item }}" + enabled: yes + state: started + with_items: + - apache-httpd.service diff --git a/install-files/openstack/usr/share/openstack/hosts b/install-files/openstack/usr/share/openstack/hosts new file mode 100644 index 00000000..5b97818d --- /dev/null +++ b/install-files/openstack/usr/share/openstack/hosts @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/install-files/openstack/usr/share/openstack/ironic.yml b/install-files/openstack/usr/share/openstack/ironic.yml new file mode 100644 index 00000000..db0a8aa8 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/ironic.yml @@ -0,0 +1,104 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/ironic.conf" + tasks: + - name: Create the ironic user + user: + name: ironic + comment: Openstack Ironic Daemons + shell: /sbin/nologin + home: /var/lib/ironic + + - name: Create the /var folders for Ironic + file: + path: "{{ item }}" + state: directory + owner: ironic + group: ironic + with_items: + - /var/run/ironic + - /var/lock/ironic + - /var/log/ironic + - /var/lib/ironic + + - file: path=/etc/ironic state=directory + - name: Add the configuration needed for ironic in /etc/ironic using templates + template: + src: /usr/share/openstack/ironic/{{ item }} + dest: /etc/ironic/{{ item }} + with_lines: + - cd /usr/share/openstack/ironic && find -type f + + - name: Create Ironic service user in service tenant + keystone_user: + user: "{{ IRONIC_SERVICE_USER }}" + password: "{{ IRONIC_SERVICE_PASSWORD }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Assign admin role to Ironic service user in the service tenant + keystone_user: + role: admin + user: "{{ IRONIC_SERVICE_USER }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Add Ironic endpoint + keystone_service: + name: ironic + type: baremetal + description: Openstack Ironic Service + publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385' + internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385' + adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385' + region: 'regionOne' + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Create postgresql user for Ironic + postgresql_user: + name: "{{ IRONIC_DB_USER }}" + login_host: "{{ CONTROLLER_HOST_ADDRESS }}" + password: "{{ IRONIC_DB_PASSWORD }}" + sudo: yes + sudo_user: ironic + + - name: Create database for Ironic services + postgresql_db: + name: ironic + owner: "{{ IRONIC_DB_USER }}" + login_host: "{{ CONTROLLER_HOST_ADDRESS }}" + sudo: yes + sudo_user: ironic + + - name: Initiate Ironic database + # Use 'upgrade' instead of 'create_schema' to make the operation + # idempotent + shell: | + ironic-dbsync \ + --config-file /etc/ironic/ironic.conf upgrade + sudo: yes + sudo_user: ironic + + - name: Enable and start openstack-ironic services + service: + name: "{{ item }}" + enabled: yes + state: started + with_items: + - openstack-ironic-conductor.service + - openstack-ironic-api.service + + - name: Set owner and group for the tftp root directory + file: + path: "/srv/tftp_root/" + state: directory + owner: ironic + group: ironic + recurse: yes + + - name: Enable and start tftp-hpa + service: + name: tftp-hpa.socket + enabled: yes + state: started diff --git a/install-files/openstack/usr/share/openstack/ironic/ironic.conf b/install-files/openstack/usr/share/openstack/ironic/ironic.conf new file mode 100644 index 00000000..75c62b8e --- /dev/null +++ b/install-files/openstack/usr/share/openstack/ironic/ironic.conf @@ -0,0 +1,1247 @@ +[DEFAULT] + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer +# value) +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +rabbit_host={{ RABBITMQ_HOST }} + +# The RabbitMQ broker port where a single node is used. +# (integer value) +rabbit_port={{ RABBITMQ_PORT }} + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid={{ RABBITMQ_USER }} + +# The RabbitMQ password. (string value) +rabbit_password={{ RABBITMQ_PASSWORD }} + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=ironic + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in ironic.netconf +# + +# IP address of this host. (string value) +my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# Use IPv6. (boolean value) +#use_ipv6=false + + +# +# Options defined in ironic.api.app +# + +# Method to use for authentication: noauth or keystone. +# (string value) +#auth_strategy=keystone + + +# +# Options defined in ironic.common.driver_factory +# + +# Specify the list of drivers to load during service +# initialization. Missing drivers, or drivers which fail to +# initialize, will prevent the conductor service from +# starting. The option default is a recommended set of +# production-oriented drivers. A complete list of drivers +# present on your system may be found by enumerating the +# "ironic.drivers" entrypoint. An example may be found in the +# developer documentation online. (list value) +enabled_drivers=pxe_ipmitool,pxe_ssh + + +# +# Options defined in ironic.common.exception +# + +# Make exception message format errors fatal. (boolean value) +#fatal_exception_format_errors=false + + +# +# Options defined in ironic.common.hash_ring +# + +# Exponent to determine number of hash partitions to use when +# distributing load across conductors. Larger values will +# result in more even distribution of load and less load when +# rebalancing the ring, but more memory usage. Number of +# partitions per conductor is (2^hash_partition_exponent). +# This determines the granularity of rebalancing: given 10 +# hosts, and an exponent of the 2, there are 40 partitions in +# the ring.A few thousand partitions should make rebalancing +# smooth in most cases. The default is suitable for up to a +# few hundred conductors. Too many partitions has a CPU +# impact. (integer value) +#hash_partition_exponent=5 + +# [Experimental Feature] Number of hosts to map onto each hash +# partition. Setting this to more than one will cause +# additional conductor services to prepare deployment +# environments and potentially allow the Ironic cluster to +# recover more quickly if a conductor instance is terminated. +# (integer value) +#hash_distribution_replicas=1 + + +# +# Options defined in ironic.common.images +# + +# Force backing images to raw format. (boolean value) +#force_raw_images=true + +# Path to isolinux binary file. (string value) +#isolinux_bin=/usr/lib/syslinux/isolinux.bin + +# Template file for isolinux configuration file. (string +# value) +#isolinux_config_template=$pybasedir/common/isolinux_config.template + + +# +# Options defined in ironic.common.paths +# + +# Directory where the ironic python module is installed. +# (string value) +#pybasedir=/usr/lib/python/site-packages/ironic + +# Directory where ironic binaries are installed. (string +# value) +#bindir=$pybasedir/bin + +# Top-level directory for maintaining ironic's state. (string +# value) +#state_path=$pybasedir + + +# +# Options defined in ironic.common.policy +# + +# JSON file representing policy. (string value) +#policy_file=policy.json + +# Rule checked when requested rule is not found. (string +# value) +#policy_default_rule=default + + +# +# Options defined in ironic.common.service +# + +# Seconds between running periodic tasks. (integer value) +#periodic_interval=60 + +# Name of this node. This can be an opaque identifier. It is +# not necessarily a hostname, FQDN, or IP address. However, +# the node name must be valid within an AMQP key, and if using +# ZeroMQ, a valid hostname, FQDN, or IP address. (string +# value) +#host=ironic + + +# +# Options defined in ironic.common.utils +# + +# Path to the rootwrap configuration file to use for running +# commands as root. (string value) +#rootwrap_config=/etc/ironic/rootwrap.conf + +# Explicitly specify the temporary working directory. (string +# value) +#tempdir= + + +# +# Options defined in ironic.drivers.modules.image_cache +# + +# Run image downloads and raw format conversions in parallel. +# (boolean value) +#parallel_image_downloads=false + + +# +# Options defined in ironic.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in ironic.openstack.common.lockutils +# + +# Enables or disables inter-process locks. (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +#lock_path= + + +# +# Options defined in ironic.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error. (boolean value) +#use_stderr=true + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will change in J to honor RFC5424. (boolean +# value) +use_syslog=True + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in ironic.openstack.common.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + + +[agent] + +# +# Options defined in ironic.drivers.modules.agent +# + +# Additional append parameters for baremetal PXE boot. (string +# value) +#agent_pxe_append_params=nofb nomodeset vga=normal + +# Template file for PXE configuration. (string value) +#agent_pxe_config_template=$pybasedir/drivers/modules/agent_config.template + +# Neutron bootfile DHCP parameter. (string value) +#agent_pxe_bootfile_name=pxelinux.0 + +# Maximum interval (in seconds) for agent heartbeats. (integer +# value) +#heartbeat_timeout=300 + + +# +# Options defined in ironic.drivers.modules.agent_client +# + +# API version to use for communicating with the ramdisk agent. +# (string value) +#agent_api_version=v1 + + +[api] + +# +# Options defined in ironic.api +# + +# The listen IP for the Ironic API server. (string value) +#host_ip=0.0.0.0 + +# The port for the Ironic API server. (integer value) +#port=6385 + +# The maximum number of items returned in a single response +# from a collection resource. (integer value) +#max_limit=1000 + + +[conductor] + +# +# Options defined in ironic.conductor.manager +# + +# URL of Ironic API service. If not set ironic can get the +# current value from the keystone service catalog. (string +# value) +api_url=http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:6385 + +# Seconds between conductor heart beats. (integer value) +#heartbeat_interval=10 + +# Maximum time (in seconds) since the last check-in of a +# conductor. (integer value) +#heartbeat_timeout=60 + +# Interval between syncing the node power state to the +# database, in seconds. (integer value) +#sync_power_state_interval=60 + +# Interval between checks of provision timeouts, in seconds. +# (integer value) +#check_provision_state_interval=60 + +# Timeout (seconds) for waiting callback from deploy ramdisk. +# 0 - unlimited. (integer value) +#deploy_callback_timeout=1800 + +# During sync_power_state, should the hardware power state be +# set to the state recorded in the database (True) or should +# the database be updated based on the hardware state (False). +# (boolean value) +#force_power_state_during_sync=true + +# During sync_power_state failures, limit the number of times +# Ironic should try syncing the hardware node power state with +# the node power state in DB (integer value) +#power_state_sync_max_retries=3 + +# Maximum number of worker threads that can be started +# simultaneously by a periodic task. Should be less than RPC +# thread pool size. (integer value) +#periodic_max_workers=8 + +# The size of the workers greenthread pool. (integer value) +#workers_pool_size=100 + +# Number of attempts to grab a node lock. (integer value) +#node_locked_retry_attempts=3 + +# Seconds to sleep between node lock attempts. (integer value) +#node_locked_retry_interval=1 + +# Enable sending sensor data message via the notification bus +# (boolean value) +#send_sensor_data=false + +# Seconds between conductor sending sensor data message to +# ceilometer via the notification bus. (integer value) +#send_sensor_data_interval=600 + +# List of comma separated metric types which need to be sent +# to Ceilometer. The default value, "ALL", is a special value +# meaning send all the sensor data. (list value) +#send_sensor_data_types=ALL + +# When conductors join or leave the cluster, existing +# conductors may need to update any persistent local state as +# nodes are moved around the cluster. This option controls how +# often, in seconds, each conductor will check for nodes that +# it should "take over". Set it to a negative value to disable +# the check entirely. (integer value) +#sync_local_state_interval=180 + + +[console] + +# +# Options defined in ironic.drivers.modules.console_utils +# + +# Path to serial console terminal program (string value) +#terminal=shellinaboxd + +# Directory containing the terminal SSL cert(PEM) for serial +# console access (string value) +#terminal_cert_dir= + +# Directory for holding terminal pid files. If not specified, +# the temporary directory will be used. (string value) +#terminal_pid_dir= + +# Time interval (in seconds) for checking the status of +# console subprocess. (integer value) +#subprocess_checking_interval=1 + +# Time (in seconds) to wait for the console subprocess to +# start. (integer value) +#subprocess_timeout=10 + + +[database] + +# +# Options defined in oslo.db +# + +# The file name to use with SQLite. (string value) +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the +# database. (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +connection=postgresql://{{ IRONIC_DB_USER}}:{{ IRONIC_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/ironic + +# The SQLAlchemy connection string to use to connect to the +# slave database. (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, +# 100=Everything. (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost. (boolean value) +#use_db_reconnect=false + +# Seconds between database connection retries. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between database connection +# retries up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between +# database connection retries. (integer value) +#db_max_retry_interval=10 + +# Maximum database connection retries before error is raised. +# Set to -1 to specify an infinite retry count. (integer +# value) +#db_max_retries=20 + + +# +# Options defined in ironic.db.sqlalchemy.models +# + +# MySQL engine to use. (string value) +#mysql_engine=InnoDB + + +[dhcp] + +# +# Options defined in ironic.common.dhcp_factory +# + +# DHCP provider to use. "neutron" uses Neutron, and "none" +# uses a no-op provider. (string value) +#dhcp_provider=neutron + + +[disk_partitioner] + +# +# Options defined in ironic.common.disk_partitioner +# + +# After Ironic has completed creating the partition table, it +# continues to check for activity on the attached iSCSI device +# status at this interval prior to copying the image to the +# node, in seconds (integer value) +#check_device_interval=1 + +# The maximum number of times to check that the device is not +# accessed by another process. If the device is still busy +# after that, the disk partitioning will be treated as having +# failed. (integer value) +#check_device_max_retries=20 + + +[glance] + +# +# Options defined in ironic.common.glance_service.v2.image_service +# + +# A list of URL schemes that can be downloaded directly via +# the direct_url. Currently supported schemes: [file]. (list +# value) +#allowed_direct_url_schemes= + +# The secret token given to Swift to allow temporary URL +# downloads. Required for temporary URLs. (string value) +#swift_temp_url_key= + +# The length of time in seconds that the temporary URL will be +# valid for. Defaults to 20 minutes. If some deploys get a 401 +# response code when trying to download from the temporary +# URL, try raising this duration. (integer value) +#swift_temp_url_duration=1200 + +# The "endpoint" (scheme, hostname, optional port) for the +# Swift URL of the form +# "endpoint_url/api_version/account/container/object_id". Do +# not include trailing "/". For example, use +# "https://swift.example.com". Required for temporary URLs. +# (string value) +#swift_endpoint_url= + +# The Swift API version to create a temporary URL for. +# Defaults to "v1". Swift temporary URL format: +# "endpoint_url/api_version/account/container/object_id" +# (string value) +#swift_api_version=v1 + +# The account that Glance uses to communicate with Swift. The +# format is "AUTH_uuid". "uuid" is the UUID for the account +# configured in the glance-api.conf. Required for temporary +# URLs. For example: +# "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". Swift temporary +# URL format: +# "endpoint_url/api_version/account/container/object_id" +# (string value) +#swift_account= + +# The Swift container Glance is configured to store its images +# in. Defaults to "glance", which is the default in glance- +# api.conf. Swift temporary URL format: +# "endpoint_url/api_version/account/container/object_id" +# (string value) +#swift_container=glance + + +# +# Options defined in ironic.common.image_service +# + +# Default glance hostname or IP address. (string value) +glance_host={{ CONTROLLER_HOST_ADDRESS }} + +# Default glance port. (integer value) +#glance_port=9292 + +# Default protocol to use when connecting to glance. Set to +# https for SSL. (string value) +#glance_protocol=http + +# A list of the glance api servers available to ironic. Prefix +# with https:// for SSL-based glance API servers. Format is +# [hostname|IP]:port. (string value) +#glance_api_servers= + +# Allow to perform insecure SSL (https) requests to glance. +# (boolean value) +#glance_api_insecure=false + +# Number of retries when downloading an image from glance. +# (integer value) +#glance_num_retries=0 + +# Default protocol to use when connecting to glance. Set to +# https for SSL. (string value) +#auth_strategy=keystone + + +[ilo] + +# +# Options defined in ironic.drivers.modules.ilo.common +# + +# Timeout (in seconds) for iLO operations (integer value) +#client_timeout=60 + +# Port to be used for iLO operations (integer value) +#client_port=443 + +# The Swift iLO container to store data. (string value) +#swift_ilo_container=ironic_ilo_container + +# Amount of time in seconds for Swift objects to auto-expire. +# (integer value) +#swift_object_expiry_timeout=900 + + +# +# Options defined in ironic.drivers.modules.ilo.power +# + +# Number of times a power operation needs to be retried +# (integer value) +#power_retry=6 + +# Amount of time in seconds to wait in between power +# operations (integer value) +#power_wait=2 + + +[ipmi] + +# +# Options defined in ironic.drivers.modules.ipminative +# + +# Maximum time in seconds to retry IPMI operations. (integer +# value) +#retry_timeout=60 + +# Minimum time, in seconds, between IPMI operations sent to a +# server. There is a risk with some hardware that setting this +# too low may cause the BMC to crash. Recommended setting is 5 +# seconds. (integer value) +#min_command_interval=5 + + +[keystone_authtoken] + +# +# Options defined in keystonemiddleware.auth_token +# + +# Prefix to prepend at the beginning of the path. Deprecated, +# use identity_uri. (string value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint. Deprecated, +# use identity_uri. (string value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use +# identity_uri. (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint (http or https). +# Deprecated, use identity_uri. (string value) +#auth_protocol=https + +# Complete public Identity API endpoint (string value) +auth_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 + +# Complete admin Identity API endpoint. This should specify +# the unversioned root endpoint e.g. https://localhost:35357/ +# (string value) +identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 + +# API version of the admin Identity API endpoint (string +# value) +#auth_version= + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# This option is deprecated and may be removed in a future +# release. Single shared secret with the Keystone +# configuration used for bootstrapping a Keystone +# installation, or otherwise bypassing the normal +# authentication process. This option should not be used, use +# `admin_user` and `admin_password` instead. (string value) +#admin_token= + +# Keystone account username (string value) +admin_user={{ IRONIC_SERVICE_USER }} + +# Keystone account password (string value) +admin_password={{ IRONIC_SERVICE_PASSWORD }} + +# Keystone service account tenant name to validate user tokens +# (string value) +admin_tenant_name=service + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for +# caching. If left undefined, tokens will instead be cached +# in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating +# tokens, the middleware caches previously-seen tokens for a +# configurable duration (in seconds). Set to -1 to disable +# caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens +# is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache +# duration may significantly reduce performance. (integer +# value) +#revocation_cache_time=10 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) number of seconds memcached server is considered +# dead before it is tried again. (integer value) +#memcache_pool_dead_retry=300 + +# (optional) max total number of open connections to every +# memcached server. (integer value) +#memcache_pool_maxsize=10 + +# (optional) socket timeout in seconds for communicating with +# a memcache server. (integer value) +#memcache_pool_socket_timeout=3 + +# (optional) number of seconds a connection to memcached is +# held unused in the pool before it is closed. (integer value) +#memcache_pool_unused_timeout=60 + +# (optional) number of seconds that an operation will wait to +# get a memcache client connection from the pool. (integer +# value) +#memcache_pool_conn_get_timeout=10 + +# (optional) use the advanced (eventlet safe) memcache client +# pool. The advanced pool will only work under python 2.x. +# (boolean value) +#memcache_use_advanced_pool=false + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + +# If true, the revocation list will be checked for cached +# tokens. This requires that PKI tokens are configured on the +# Keystone server. (boolean value) +#check_revocations_for_cached=false + +# Hash algorithms to use for hashing PKI tokens. This may be a +# single algorithm or multiple. The algorithms are those +# supported by Python standard hashlib.new(). The hashes will +# be tried in the order given, so put the preferred one first +# for performance. The result of the first hash will be stored +# in the cache. This will typically be set to multiple values +# only while migrating from a less secure algorithm to a more +# secure one. Once all the old tokens are expired this option +# should be set to a single value for better performance. +# (list value) +#hash_algorithms=md5 + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[neutron] + +# +# Options defined in ironic.dhcp.neutron +# + +# URL for connecting to neutron. (string value) +url=http://{{ CONTROLLER_HOST_ADDRESS }}:9696 + +# Timeout value for connecting to neutron in seconds. (integer +# value) +#url_timeout=30 + +# Default authentication strategy to use when connecting to +# neutron. Can be either "keystone" or "noauth". Running +# neutron in noauth mode (related to but not affected by this +# setting) is insecure and should only be used for testing. +# (string value) +#auth_strategy=keystone + + +[pxe] + +# +# Options defined in ironic.drivers.modules.iscsi_deploy +# + +# Additional append parameters for baremetal PXE boot. (string +# value) +#pxe_append_params=nofb nomodeset vga=normal + +# Default file system format for ephemeral partition, if one +# is created. (string value) +#default_ephemeral_format=ext4 + +# Directory where images are stored on disk. (string value) +#images_path=/var/lib/ironic/images/ + +# Directory where master instance images are stored on disk. +# (string value) +#instance_master_path=/var/lib/ironic/master_images + +# Maximum size (in MiB) of cache for master images, including +# those in use. (integer value) +#image_cache_size=20480 + +# Maximum TTL (in minutes) for old master images in cache. +# (integer value) +#image_cache_ttl=10080 + +# The disk devices to scan while doing the deploy. (string +# value) +#disk_devices=cciss/c0d0,sda,hda,vda + + +# +# Options defined in ironic.drivers.modules.pxe +# + +# Template file for PXE configuration. (string value) +#pxe_config_template=$pybasedir/drivers/modules/pxe_config.template + +# Template file for PXE configuration for UEFI boot loader. +# (string value) +#uefi_pxe_config_template=$pybasedir/drivers/modules/elilo_efi_pxe_config.template + +# IP address of Ironic compute node's tftp server. (string +# value) +#tftp_server=$my_ip + +# Ironic compute node's tftp root path. (string value) +tftp_root=/srv/tftp_root/ + +# Directory where master tftp images are stored on disk. +# (string value) +tftp_master_path=/srv/tftp_root/master_images + +# Bootfile DHCP parameter. (string value) +#pxe_bootfile_name=pxelinux.0 + +# Bootfile DHCP parameter for UEFI boot mode. (string value) +#uefi_pxe_bootfile_name=elilo.efi + +# Ironic compute node's HTTP server URL. Example: +# http://192.1.2.3:8080 (string value) +#http_url= + +# Ironic compute node's HTTP root path. (string value) +#http_root=/httpboot + +# Enable iPXE boot. (boolean value) +#ipxe_enabled=false + +# The path to the main iPXE script file. (string value) +#ipxe_boot_script=$pybasedir/drivers/modules/boot.ipxe + + +[seamicro] + +# +# Options defined in ironic.drivers.modules.seamicro +# + +# Maximum retries for SeaMicro operations (integer value) +#max_retry=3 + +# Seconds to wait for power action to be completed (integer +# value) +#action_timeout=10 + + +[snmp] + +# +# Options defined in ironic.drivers.modules.snmp +# + +# Seconds to wait for power action to be completed (integer +# value) +#power_timeout=10 + + +[ssh] + +# +# Options defined in ironic.drivers.modules.ssh +# + +# libvirt uri (string value) +#libvirt_uri=qemu:///system + + +[swift] + +# +# Options defined in ironic.common.swift +# + +# Maximum number of times to retry a Swift request, before +# failing. (integer value) +#swift_max_retries=2 + + diff --git a/install-files/openstack/usr/share/openstack/ironic/policy.json b/install-files/openstack/usr/share/openstack/ironic/policy.json new file mode 100644 index 00000000..94ac3a5b --- /dev/null +++ b/install-files/openstack/usr/share/openstack/ironic/policy.json @@ -0,0 +1,5 @@ +{ + "admin": "role:admin or role:administrator", + "admin_api": "is_admin:True", + "default": "rule:admin_api" +} diff --git a/install-files/openstack/usr/share/openstack/iscsi.yml b/install-files/openstack/usr/share/openstack/iscsi.yml new file mode 100644 index 00000000..b80377ae --- /dev/null +++ b/install-files/openstack/usr/share/openstack/iscsi.yml @@ -0,0 +1,15 @@ +--- +- hosts: localhost + tasks: + - name: Update kernel module dependencies + command: depmod -a + + - name: generate InitiatorName for iscsi + shell: iscsi-iname + register: initiator_name + + - lineinfile: + dest: /etc/iscsi/initiatorname.iscsi + regexp: '^InitiatorName=$' + line: 'InitiatorName={{ initiator_name.stdout }}' + backrefs: yes diff --git a/install-files/openstack/usr/share/openstack/keystone.yml b/install-files/openstack/usr/share/openstack/keystone.yml new file mode 100644 index 00000000..330d74d0 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/keystone.yml @@ -0,0 +1,143 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/keystone.conf" + tasks: + + # RabbitMQ configuration, this may end up in a different playbook + - name: Create rabbitmq user + user: + name: rabbitmq + comment: Rabbitmq server daemon + shell: /sbin/nologin + home: /var/lib/rabbitmq + + - name: Create the rabbitmq directories + file: + path: "{{ item }}" + state: directory + owner: rabbitmq + group: rabbitmq + with_items: + - /var/run/rabbitmq + - /var/log/rabbitmq + - /etc/rabbitmq + + - name: Add the configuration needed for rabbitmq in /etc/rabbitmq using templates + template: + src: /usr/share/openstack/rabbitmq/{{ item }} + dest: /etc/rabbitmq/{{ item }} + owner: rabbitmq + group: rabbitmq + mode: 0644 + with_items: + - rabbitmq.config + - rabbitmq-env.conf + + - name: Enable and start rabbitmq services + service: + name: "{{ item }}" + enabled: yes + state: started + with_items: + - rabbitmq-server + + # Keystone configuration + - name: Create the keystone user. + user: + name: keystone + comment: Openstack Keystone Daemons + shell: /sbin/nologin + home: /var/lib/keystone + + - name: Create the /var folders for keystone + file: + path: "{{ item }}" + state: directory + owner: keystone + group: keystone + with_items: + - /var/run/keystone + - /var/lock/keystone + - /var/log/keystone + - /var/lib/keystone + + - name: Create /etc/keystone directory + file: + path: /etc/keystone + state: directory + + - name: Add the configuration needed for keystone in /etc using templates + template: + src: /usr/share/openstack/keystone/{{ item }} + dest: /etc/keystone/{{ item }} + with_lines: + - cd /usr/share/openstack/keystone && find -type f + + - name: Create postgresql user for keystone + postgresql_user: + name: "{{ KEYSTONE_DB_USER }}" + password: "{{ KEYSTONE_DB_PASSWORD }}" + sudo: yes + sudo_user: keystone + + - name: Create database for keystone services + postgresql_db: + name: keystone + owner: "{{ KEYSTONE_DB_USER }}" + sudo: yes + sudo_user: keystone + + - name: Initiatie keystone database + keystone_manage: + action: dbsync + sudo: yes + sudo_user: keystone + + - name: Enable and start openstack-keystone service + service: + name: openstack-keystone.service + enabled: yes + state: started + + - name: Create admin tenant + keystone_user: + tenant: admin + tenant_description: Admin Tenant + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 + + - name: Create admin user for the admin tenant + keystone_user: + user: admin + tenant: admin + password: "{{ KEYSTONE_ADMIN_PASSWORD }}" + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 + + - name: Create admin role for admin user in the admin tenant + keystone_user: + role: admin + user: admin + tenant: admin + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 + + - name: Create service tenant + keystone_user: + tenant: service + tenant_description: Service Tenant + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 + + - name: Add keystone endpoint + keystone_service: + name: keystone + type: identity + description: Keystone Identity Service + publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 + internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 + adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 + region: regionOne + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 diff --git a/install-files/openstack/usr/share/openstack/keystone/keystone-paste.ini b/install-files/openstack/usr/share/openstack/keystone/keystone-paste.ini new file mode 100644 index 00000000..46f994c3 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/keystone/keystone-paste.ini @@ -0,0 +1,121 @@ +# Keystone PasteDeploy configuration file. + +[filter:debug] +paste.filter_factory = keystone.common.wsgi:Debug.factory + +[filter:build_auth_context] +paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory + +[filter:token_auth] +paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory + +[filter:admin_token_auth] +paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory + +[filter:xml_body] +paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory + +[filter:xml_body_v2] +paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV2.factory + +[filter:xml_body_v3] +paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV3.factory + +[filter:json_body] +paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory + +[filter:user_crud_extension] +paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory + +[filter:crud_extension] +paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory + +[filter:ec2_extension] +paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory + +[filter:ec2_extension_v3] +paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory + +[filter:federation_extension] +paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory + +[filter:oauth1_extension] +paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory + +[filter:s3_extension] +paste.filter_factory = keystone.contrib.s3:S3Extension.factory + +[filter:endpoint_filter_extension] +paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory + +[filter:endpoint_policy_extension] +paste.filter_factory = keystone.contrib.endpoint_policy.routers:EndpointPolicyExtension.factory + +[filter:simple_cert_extension] +paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory + +[filter:revoke_extension] +paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory + +[filter:url_normalize] +paste.filter_factory = keystone.middleware:NormalizingFilter.factory + +[filter:sizelimit] +paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory + +[filter:stats_monitoring] +paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory + +[filter:stats_reporting] +paste.filter_factory = keystone.contrib.stats:StatsExtension.factory + +[filter:access_log] +paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory + +[app:public_service] +paste.app_factory = keystone.service:public_app_factory + +[app:service_v3] +paste.app_factory = keystone.service:v3_app_factory + +[app:admin_service] +paste.app_factory = keystone.service:admin_app_factory + +[pipeline:public_api] +# The last item in this pipeline must be public_service or an equivalent +# application. It cannot be a filter. +pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension user_crud_extension public_service + +[pipeline:admin_api] +# The last item in this pipeline must be admin_service or an equivalent +# application. It cannot be a filter. +pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension s3_extension crud_extension admin_service + +[pipeline:api_v3] +# The last item in this pipeline must be service_v3 or an equivalent +# application. It cannot be a filter. +pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v3 json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension service_v3 + +[app:public_version_service] +paste.app_factory = keystone.service:public_version_app_factory + +[app:admin_version_service] +paste.app_factory = keystone.service:admin_version_app_factory + +[pipeline:public_version_api] +pipeline = sizelimit url_normalize xml_body public_version_service + +[pipeline:admin_version_api] +pipeline = sizelimit url_normalize xml_body admin_version_service + +[composite:main] +use = egg:Paste#urlmap +/v2.0 = public_api +/v3 = api_v3 +/ = public_version_api + +[composite:admin] +use = egg:Paste#urlmap +/v2.0 = admin_api +/v3 = api_v3 +/ = admin_version_api diff --git a/install-files/openstack/usr/share/openstack/keystone/keystone.conf b/install-files/openstack/usr/share/openstack/keystone/keystone.conf new file mode 100644 index 00000000..4e04c81b --- /dev/null +++ b/install-files/openstack/usr/share/openstack/keystone/keystone.conf @@ -0,0 +1,1588 @@ +[DEFAULT] + +# +# Options defined in keystone +# + +# A "shared secret" that can be used to bootstrap Keystone. +# This "token" does not represent a user, and carries no +# explicit authorization. To disable in production (highly +# recommended), remove AdminTokenAuthMiddleware from your +# paste application pipelines (for example, in keystone- +# paste.ini). (string value) +admin_token={{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }} + +# The IP address of the network interface for the public +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#public_bind_host=0.0.0.0 + +# The IP address of the network interface for the admin +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#admin_bind_host=0.0.0.0 + +# (Deprecated) The port which the OpenStack Compute service +# listens on. This option was only used for string replacement +# in the templated catalog backend. Templated catalogs should +# replace the "$(compute_port)s" substitution with the static +# port of the compute service. As of Juno, this option is +# deprecated and will be removed in the L release. (integer +# value) +#compute_port=8774 + +# The port number which the admin service listens on. (integer +# value) +admin_port=35357 + +# The port number which the public service listens on. +# (integer value) +public_port=5000 + +# The base public endpoint URL for Keystone that is advertised +# to clients (NOTE: this does NOT affect how Keystone listens +# for connections). Defaults to the base host URL of the +# request. E.g. a request to http://server:5000/v2.0/users +# will default to http://server:5000. You should only need to +# set this value if the base URL contains a path (e.g. +# /prefix/v2.0) or the endpoint should be found on a different +# server. (string value) +#public_endpoint= + +# The base admin endpoint URL for Keystone that is advertised +# to clients (NOTE: this does NOT affect how Keystone listens +# for connections). Defaults to the base host URL of the +# request. E.g. a request to http://server:35357/v2.0/users +# will default to http://server:35357. You should only need to +# set this value if the base URL contains a path (e.g. +# /prefix/v2.0) or the endpoint should be found on a different +# server. (string value) +#admin_endpoint= + +# The number of worker processes to serve the public WSGI +# application. Defaults to number of CPUs (minimum of 2). +# (integer value) +#public_workers= + +# The number of worker processes to serve the admin WSGI +# application. Defaults to number of CPUs (minimum of 2). +# (integer value) +#admin_workers= + +# Enforced by optional sizelimit middleware +# (keystone.middleware:RequestBodySizeLimiter). (integer +# value) +#max_request_body_size=114688 + +# Limit the sizes of user & project ID/names. (integer value) +#max_param_size=64 + +# Similar to max_param_size, but provides an exception for +# token values. (integer value) +#max_token_size=8192 + +# During a SQL upgrade member_role_id will be used to create a +# new role that will replace records in the assignment table +# with explicit role grants. After migration, the +# member_role_id will be used in the API add_user_to_project. +# (string value) +#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab + +# During a SQL upgrade member_role_name will be used to create +# a new role that will replace records in the assignment table +# with explicit role grants. After migration, member_role_name +# will be ignored. (string value) +#member_role_name=_member_ + +# The value passed as the keyword "rounds" to passlib's +# encrypt method. (integer value) +#crypt_strength=40000 + +# Set this to true if you want to enable TCP_KEEPALIVE on +# server sockets, i.e. sockets used by the Keystone wsgi +# server for client connections. (boolean value) +#tcp_keepalive=false + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Only applies if tcp_keepalive is true. Not supported +# on OS X. (integer value) +#tcp_keepidle=600 + +# The maximum number of entities that will be returned in a +# collection, with no limit set by default. This global limit +# may be then overridden for a specific driver, by specifying +# a list_limit in the appropriate section (e.g. [assignment]). +# (integer value) +#list_limit= + +# Set this to false if you want to enable the ability for +# user, group and project entities to be moved between domains +# by updating their domain_id. Allowing such movement is not +# recommended if the scope of a domain admin is being +# restricted by use of an appropriate policy file (see +# policy.v3cloudsample as an example). (boolean value) +#domain_id_immutable=true + +# If set to true, strict password length checking is performed +# for password manipulation. If a password exceeds the maximum +# length, the operation will fail with an HTTP 403 Forbidden +# error. If set to false, passwords are automatically +# truncated to the maximum length. (boolean value) +#strict_password_check=false + + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer +# value) +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +rabbit_host={{ RABBITMQ_HOST }} + +# The RabbitMQ broker port where a single node is used. +# (integer value) +rabbit_port={{ RABBITMQ_PORT }} + +# RabbitMQ HA cluster host:port pairs. (list value) +rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid={{ RABBITMQ_USER }} + +# The RabbitMQ password. (string value) +rabbit_password={{ RABBITMQ_PASSWORD }} + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=keystone + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=keystone + + +# +# Options defined in keystone.notifications +# + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in keystone.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in keystone.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error. (boolean value) +#use_stderr=true + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will change in J to honor RFC5424. (boolean +# value) +use_syslog=True + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in keystone.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. +# (string value) +#policy_default_rule=default + + +[assignment] + +# +# Options defined in keystone +# + +# Assignment backend driver. (string value) +#driver= + +# Toggle for assignment caching. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# TTL (in seconds) to cache assignment data. This has no +# effect unless global caching is enabled. (integer value) +#cache_time= + +# Maximum number of entities that will be returned in an +# assignment collection. (integer value) +#list_limit= + + +[auth] + +# +# Options defined in keystone +# + +# Default auth methods. (list value) +#methods=external,password,token + +# The password auth plugin module. (string value) +#password=keystone.auth.plugins.password.Password + +# The token auth plugin module. (string value) +#token=keystone.auth.plugins.token.Token + +# The external (REMOTE_USER) auth plugin module. (string +# value) +#external=keystone.auth.plugins.external.DefaultDomain + + +[cache] + +# +# Options defined in keystone +# + +# Prefix for building the configuration dictionary for the +# cache region. This should not need to be changed unless +# there is another dogpile.cache region with the same +# configuration name. (string value) +#config_prefix=cache.keystone + +# Default TTL, in seconds, for any cached item in the +# dogpile.cache region. This applies to any cached method that +# doesn't have an explicit cache expiration time defined for +# it. (integer value) +#expiration_time=600 + +# Dogpile.cache backend module. It is recommended that +# Memcache with pooling (keystone.cache.memcache_pool) or +# Redis (dogpile.cache.redis) be used in production +# deployments. Small workloads (single process) like devstack +# can use the dogpile.cache.memory backend. (string value) +#backend=keystone.common.cache.noop + +# Arguments supplied to the backend module. Specify this +# option once per argument to be passed to the dogpile.cache +# backend. Example format: ":". (multi valued) +#backend_argument= + +# Proxy classes to import that will affect the way the +# dogpile.cache backend functions. See the dogpile.cache +# documentation on changing-backend-behavior. (list value) +#proxies= + +# Global toggle for all caching using the should_cache_fn +# mechanism. (boolean value) +#enabled=false + +# Extra debugging from the cache backend (cache keys, +# get/set/delete/etc calls). This is only really useful if you +# need to see the specific cache-backend get/set/delete calls +# with the keys/values. Typically this should be left set to +# false. (boolean value) +#debug_cache_backend=false + +# Memcache servers in the format of "host:port". +# (dogpile.cache.memcache and keystone.cache.memcache_pool +# backends only) (list value) +#memcache_servers=localhost:11211 + +# Number of seconds memcached server is considered dead before +# it is tried again. (dogpile.cache.memcache and +# keystone.cache.memcache_pool backends only) (integer value) +#memcache_dead_retry=300 + +# Timeout in seconds for every call to a server. +# (dogpile.cache.memcache and keystone.cache.memcache_pool +# backends only) (integer value) +#memcache_socket_timeout=3 + +# Max total number of open connections to every memcached +# server. (keystone.cache.memcache_pool backend only) (integer +# value) +#memcache_pool_maxsize=10 + +# Number of seconds a connection to memcached is held unused +# in the pool before it is closed. +# (keystone.cache.memcache_pool backend only) (integer value) +#memcache_pool_unused_timeout=60 + +# Number of seconds that an operation will wait to get a +# memcache client connection. (integer value) +#memcache_pool_connection_get_timeout=10 + + +[catalog] + +# +# Options defined in keystone +# + +# Catalog template file name for use with the template catalog +# backend. (string value) +#template_file=default_catalog.templates + +# Catalog backend driver. (string value) +#driver=keystone.catalog.backends.sql.Catalog + +# Toggle for catalog caching. This has no effect unless global +# caching is enabled. (boolean value) +#caching=true + +# Time to cache catalog data (in seconds). This has no effect +# unless global and catalog caching are enabled. (integer +# value) +#cache_time= + +# Maximum number of entities that will be returned in a +# catalog collection. (integer value) +#list_limit= + +# (Deprecated) List of possible substitutions for use in +# formatting endpoints. Use caution when modifying this list. +# It will give users with permission to create endpoints the +# ability to see those values in your configuration file. This +# option will be removed in Juno. (list value) +#endpoint_substitution_whitelist=tenant_id,user_id,public_bind_host,admin_bind_host,compute_host,compute_port,admin_port,public_port,public_endpoint,admin_endpoint + + +[credential] + +# +# Options defined in keystone +# + +# Credential backend driver. (string value) +#driver=keystone.credential.backends.sql.Credential + + +[database] + +# +# Options defined in oslo.db +# + +# The file name to use with SQLite. (string value) +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the +# database. (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection= +connection=postgresql://{{ KEYSTONE_DB_USER }}:{{ KEYSTONE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/keystone + +# The SQLAlchemy connection string to use to connect to the +# slave database. (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, +# 100=Everything. (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost. (boolean value) +#use_db_reconnect=false + +# Seconds between database connection retries. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between database connection +# retries up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between +# database connection retries. (integer value) +#db_max_retry_interval=10 + +# Maximum database connection retries before error is raised. +# Set to -1 to specify an infinite retry count. (integer +# value) +#db_max_retries=20 + + +[ec2] + +# +# Options defined in keystone +# + +# EC2Credential backend driver. (string value) +#driver=keystone.contrib.ec2.backends.kvs.Ec2 + + +[endpoint_filter] + +# +# Options defined in keystone +# + +# Endpoint Filter backend driver (string value) +#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter + +# Toggle to return all active endpoints if no filter exists. +# (boolean value) +#return_all_endpoints_if_no_filter=true + + +[endpoint_policy] + +# +# Options defined in keystone +# + +# Endpoint policy backend driver (string value) +#driver=keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy + + +[federation] + +# +# Options defined in keystone +# + +# Federation backend driver. (string value) +#driver=keystone.contrib.federation.backends.sql.Federation + +# Value to be used when filtering assertion parameters from +# the environment. (string value) +#assertion_prefix= + + +[identity] + +# +# Options defined in keystone +# + +# This references the domain to use for all Identity API v2 +# requests (which are not aware of domains). A domain with +# this ID will be created for you by keystone-manage db_sync +# in migration 008. The domain referenced by this ID cannot be +# deleted on the v3 API, to prevent accidentally breaking the +# v2 API. There is nothing special about this domain, other +# than the fact that it must exist to order to maintain +# support for your v2 clients. (string value) +#default_domain_id=default + +# A subset (or all) of domains can have their own identity +# driver, each with their own partial configuration file in a +# domain configuration directory. Only values specific to the +# domain need to be placed in the domain specific +# configuration file. This feature is disabled by default; set +# to true to enable. (boolean value) +#domain_specific_drivers_enabled=false + +# Path for Keystone to locate the domain specific identity +# configuration files if domain_specific_drivers_enabled is +# set to true. (string value) +#domain_config_dir=/etc/keystone/domains + +# Identity backend driver. (string value) +#driver=keystone.identity.backends.sql.Identity + +# Maximum supported length for user passwords; decrease to +# improve performance. (integer value) +#max_password_length=4096 + +# Maximum number of entities that will be returned in an +# identity collection. (integer value) +#list_limit= + + +[identity_mapping] + +# +# Options defined in keystone +# + +# Keystone Identity Mapping backend driver. (string value) +#driver=keystone.identity.mapping_backends.sql.Mapping + +# Public ID generator for user and group entities. The +# Keystone identity mapper only supports generators that +# produce no more than 64 characters. (string value) +#generator=keystone.identity.id_generators.sha256.Generator + +# The format of user and group IDs changed in Juno for +# backends that do not generate UUIDs (e.g. LDAP), with +# keystone providing a hash mapping to the underlying +# attribute in LDAP. By default this mapping is disabled, +# which ensures that existing IDs will not change. Even when +# the mapping is enabled by using domain specific drivers, any +# users and groups from the default domain being handled by +# LDAP will still not be mapped to ensure their IDs remain +# backward compatible. Setting this value to False will enable +# the mapping for even the default LDAP driver. It is only +# safe to do this if you do not already have assignments for +# users and groups from the default LDAP domain, and it is +# acceptable for Keystone to provide the different IDs to +# clients than it did previously. Typically this means that +# the only time you can set this value to False is when +# configuring a fresh installation. (boolean value) +#backward_compatible_ids=true + + +[kvs] + +# +# Options defined in keystone +# + +# Extra dogpile.cache backend modules to register with the +# dogpile.cache library. (list value) +#backends= + +# Prefix for building the configuration dictionary for the KVS +# region. This should not need to be changed unless there is +# another dogpile.cache region with the same configuration +# name. (string value) +#config_prefix=keystone.kvs + +# Toggle to disable using a key-mangling function to ensure +# fixed length keys. This is toggle-able for debugging +# purposes, it is highly recommended to always leave this set +# to true. (boolean value) +#enable_key_mangler=true + +# Default lock timeout for distributed locking. (integer +# value) +#default_lock_timeout=5 + + +[ldap] + +# +# Options defined in keystone +# + +# URL for connecting to the LDAP server. (string value) +#url=ldap://localhost + +# User BindDN to query the LDAP server. (string value) +#user= + +# Password for the BindDN to query the LDAP server. (string +# value) +#password= + +# LDAP server suffix (string value) +#suffix=cn=example,cn=com + +# If true, will add a dummy member to groups. This is required +# if the objectclass for groups requires the "member" +# attribute. (boolean value) +#use_dumb_member=false + +# DN of the "dummy member" to use when "use_dumb_member" is +# enabled. (string value) +#dumb_member=cn=dumb,dc=nonexistent + +# Delete subtrees using the subtree delete control. Only +# enable this option if your LDAP server supports subtree +# deletion. (boolean value) +#allow_subtree_delete=false + +# The LDAP scope for queries, this can be either "one" +# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). +# (string value) +#query_scope=one + +# Maximum results per page; a value of zero ("0") disables +# paging. (integer value) +#page_size=0 + +# The LDAP dereferencing option for queries. This can be +# either "never", "searching", "always", "finding" or +# "default". The "default" option falls back to using default +# dereferencing configured by your ldap.conf. (string value) +#alias_dereferencing=default + +# Sets the LDAP debugging level for LDAP calls. A value of 0 +# means that debugging is not enabled. This value is a +# bitmask, consult your LDAP documentation for possible +# values. (integer value) +#debug_level= + +# Override the system's default referral chasing behavior for +# queries. (boolean value) +#chase_referrals= + +# Search base for users. (string value) +#user_tree_dn= + +# LDAP search filter for users. (string value) +#user_filter= + +# LDAP objectclass for users. (string value) +#user_objectclass=inetOrgPerson + +# LDAP attribute mapped to user id. WARNING: must not be a +# multivalued attribute. (string value) +#user_id_attribute=cn + +# LDAP attribute mapped to user name. (string value) +#user_name_attribute=sn + +# LDAP attribute mapped to user email. (string value) +#user_mail_attribute=mail + +# LDAP attribute mapped to password. (string value) +#user_pass_attribute=userPassword + +# LDAP attribute mapped to user enabled flag. (string value) +#user_enabled_attribute=enabled + +# Invert the meaning of the boolean enabled values. Some LDAP +# servers use a boolean lock attribute where "true" means an +# account is disabled. Setting "user_enabled_invert = true" +# will allow these lock attributes to be used. This setting +# will have no effect if "user_enabled_mask" or +# "user_enabled_emulation" settings are in use. (boolean +# value) +#user_enabled_invert=false + +# Bitmask integer to indicate the bit that the enabled value +# is stored in if the LDAP server represents "enabled" as a +# bit on an integer rather than a boolean. A value of "0" +# indicates the mask is not used. If this is not set to "0" +# the typical value is "2". This is typically used when +# "user_enabled_attribute = userAccountControl". (integer +# value) +#user_enabled_mask=0 + +# Default value to enable users. This should match an +# appropriate int value if the LDAP server uses non-boolean +# (bitmask) values to indicate if a user is enabled or +# disabled. If this is not set to "True" the typical value is +# "512". This is typically used when "user_enabled_attribute = +# userAccountControl". (string value) +#user_enabled_default=True + +# List of attributes stripped off the user on update. (list +# value) +#user_attribute_ignore=default_project_id,tenants + +# LDAP attribute mapped to default_project_id for users. +# (string value) +#user_default_project_id_attribute= + +# Allow user creation in LDAP backend. (boolean value) +#user_allow_create=true + +# Allow user updates in LDAP backend. (boolean value) +#user_allow_update=true + +# Allow user deletion in LDAP backend. (boolean value) +#user_allow_delete=true + +# If true, Keystone uses an alternative method to determine if +# a user is enabled or not by checking if they are a member of +# the "user_enabled_emulation_dn" group. (boolean value) +#user_enabled_emulation=false + +# DN of the group entry to hold enabled users when using +# enabled emulation. (string value) +#user_enabled_emulation_dn= + +# List of additional LDAP attributes used for mapping +# additional attribute mappings for users. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#user_additional_attribute_mapping= + +# Search base for projects (string value) +# Deprecated group/name - [ldap]/tenant_tree_dn +#project_tree_dn= + +# LDAP search filter for projects. (string value) +# Deprecated group/name - [ldap]/tenant_filter +#project_filter= + +# LDAP objectclass for projects. (string value) +# Deprecated group/name - [ldap]/tenant_objectclass +#project_objectclass=groupOfNames + +# LDAP attribute mapped to project id. (string value) +# Deprecated group/name - [ldap]/tenant_id_attribute +#project_id_attribute=cn + +# LDAP attribute mapped to project membership for user. +# (string value) +# Deprecated group/name - [ldap]/tenant_member_attribute +#project_member_attribute=member + +# LDAP attribute mapped to project name. (string value) +# Deprecated group/name - [ldap]/tenant_name_attribute +#project_name_attribute=ou + +# LDAP attribute mapped to project description. (string value) +# Deprecated group/name - [ldap]/tenant_desc_attribute +#project_desc_attribute=description + +# LDAP attribute mapped to project enabled. (string value) +# Deprecated group/name - [ldap]/tenant_enabled_attribute +#project_enabled_attribute=enabled + +# LDAP attribute mapped to project domain_id. (string value) +# Deprecated group/name - [ldap]/tenant_domain_id_attribute +#project_domain_id_attribute=businessCategory + +# List of attributes stripped off the project on update. (list +# value) +# Deprecated group/name - [ldap]/tenant_attribute_ignore +#project_attribute_ignore= + +# Allow project creation in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_create +#project_allow_create=true + +# Allow project update in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_update +#project_allow_update=true + +# Allow project deletion in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_delete +#project_allow_delete=true + +# If true, Keystone uses an alternative method to determine if +# a project is enabled or not by checking if they are a member +# of the "project_enabled_emulation_dn" group. (boolean value) +# Deprecated group/name - [ldap]/tenant_enabled_emulation +#project_enabled_emulation=false + +# DN of the group entry to hold enabled projects when using +# enabled emulation. (string value) +# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn +#project_enabled_emulation_dn= + +# Additional attribute mappings for projects. Attribute +# mapping format is :, where ldap_attr +# is the attribute in the LDAP entry and user_attr is the +# Identity API attribute. (list value) +# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping +#project_additional_attribute_mapping= + +# Search base for roles. (string value) +#role_tree_dn= + +# LDAP search filter for roles. (string value) +#role_filter= + +# LDAP objectclass for roles. (string value) +#role_objectclass=organizationalRole + +# LDAP attribute mapped to role id. (string value) +#role_id_attribute=cn + +# LDAP attribute mapped to role name. (string value) +#role_name_attribute=ou + +# LDAP attribute mapped to role membership. (string value) +#role_member_attribute=roleOccupant + +# List of attributes stripped off the role on update. (list +# value) +#role_attribute_ignore= + +# Allow role creation in LDAP backend. (boolean value) +#role_allow_create=true + +# Allow role update in LDAP backend. (boolean value) +#role_allow_update=true + +# Allow role deletion in LDAP backend. (boolean value) +#role_allow_delete=true + +# Additional attribute mappings for roles. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#role_additional_attribute_mapping= + +# Search base for groups. (string value) +#group_tree_dn= + +# LDAP search filter for groups. (string value) +#group_filter= + +# LDAP objectclass for groups. (string value) +#group_objectclass=groupOfNames + +# LDAP attribute mapped to group id. (string value) +#group_id_attribute=cn + +# LDAP attribute mapped to group name. (string value) +#group_name_attribute=ou + +# LDAP attribute mapped to show group membership. (string +# value) +#group_member_attribute=member + +# LDAP attribute mapped to group description. (string value) +#group_desc_attribute=description + +# List of attributes stripped off the group on update. (list +# value) +#group_attribute_ignore= + +# Allow group creation in LDAP backend. (boolean value) +#group_allow_create=true + +# Allow group update in LDAP backend. (boolean value) +#group_allow_update=true + +# Allow group deletion in LDAP backend. (boolean value) +#group_allow_delete=true + +# Additional attribute mappings for groups. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#group_additional_attribute_mapping= + +# CA certificate file path for communicating with LDAP +# servers. (string value) +#tls_cacertfile= + +# CA certificate directory path for communicating with LDAP +# servers. (string value) +#tls_cacertdir= + +# Enable TLS for communicating with LDAP servers. (boolean +# value) +#use_tls=false + +# Valid options for tls_req_cert are demand, never, and allow. +# (string value) +#tls_req_cert=demand + +# Enable LDAP connection pooling. (boolean value) +#use_pool=false + +# Connection pool size. (integer value) +#pool_size=10 + +# Maximum count of reconnect trials. (integer value) +#pool_retry_max=3 + +# Time span in seconds to wait between two reconnect trials. +# (floating point value) +#pool_retry_delay=0.1 + +# Connector timeout in seconds. Value -1 indicates indefinite +# wait for response. (integer value) +#pool_connection_timeout=-1 + +# Connection lifetime in seconds. (integer value) +#pool_connection_lifetime=600 + +# Enable LDAP connection pooling for end user authentication. +# If use_pool is disabled, then this setting is meaningless +# and is not used at all. (boolean value) +#use_auth_pool=false + +# End user auth connection pool size. (integer value) +#auth_pool_size=100 + +# End user auth connection lifetime in seconds. (integer +# value) +#auth_pool_connection_lifetime=60 + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[memcache] + +# +# Options defined in keystone +# + +# Memcache servers in the format of "host:port". (list value) +#servers=localhost:11211 + +# Number of seconds memcached server is considered dead before +# it is tried again. This is used by the key value store +# system (e.g. token pooled memcached persistence backend). +# (integer value) +#dead_retry=300 + +# Timeout in seconds for every call to a server. This is used +# by the key value store system (e.g. token pooled memcached +# persistence backend). (integer value) +#socket_timeout=3 + +# Max total number of open connections to every memcached +# server. This is used by the key value store system (e.g. +# token pooled memcached persistence backend). (integer value) +#pool_maxsize=10 + +# Number of seconds a connection to memcached is held unused +# in the pool before it is closed. This is used by the key +# value store system (e.g. token pooled memcached persistence +# backend). (integer value) +#pool_unused_timeout=60 + +# Number of seconds that an operation will wait to get a +# memcache client connection. This is used by the key value +# store system (e.g. token pooled memcached persistence +# backend). (integer value) +#pool_connection_get_timeout=10 + + +[oauth1] + +# +# Options defined in keystone +# + +# Credential backend driver. (string value) +#driver=keystone.contrib.oauth1.backends.sql.OAuth1 + +# Duration (in seconds) for the OAuth Request Token. (integer +# value) +#request_token_duration=28800 + +# Duration (in seconds) for the OAuth Access Token. (integer +# value) +#access_token_duration=86400 + + +[os_inherit] + +# +# Options defined in keystone +# + +# role-assignment inheritance to projects from owning domain +# can be optionally enabled. (boolean value) +#enabled=false + + +[paste_deploy] + +# +# Options defined in keystone +# + +# Name of the paste configuration file that defines the +# available pipelines. (string value) +#config_file=keystone-paste.ini + + +[policy] + +# +# Options defined in keystone +# + +# Policy backend driver. (string value) +#driver=keystone.policy.backends.sql.Policy + +# Maximum number of entities that will be returned in a policy +# collection. (integer value) +#list_limit= + + +[revoke] + +# +# Options defined in keystone +# + +# An implementation of the backend for persisting revocation +# events. (string value) +#driver=keystone.contrib.revoke.backends.kvs.Revoke + +# This value (calculated in seconds) is added to token +# expiration before a revocation event may be removed from the +# backend. (integer value) +#expiration_buffer=1800 + +# Toggle for revocation event caching. This has no effect +# unless global caching is enabled. (boolean value) +#caching=true + + +[saml] + +# +# Options defined in keystone +# + +# Default TTL, in seconds, for any generated SAML assertion +# created by Keystone. (integer value) +#assertion_expiration_time=3600 + +# Binary to be called for XML signing. Install the appropriate +# package, specify absolute path or adjust your PATH +# environment variable if the binary cannot be found. (string +# value) +#xmlsec1_binary=xmlsec1 + +# Path of the certfile for SAML signing. For non-production +# environments, you may be interested in using `keystone- +# manage pki_setup` to generate self-signed certificates. +# Note, the path cannot contain a comma. (string value) +#certfile=/etc/keystone/ssl/certs/signing_cert.pem + +# Path of the keyfile for SAML signing. Note, the path cannot +# contain a comma. (string value) +#keyfile=/etc/keystone/ssl/private/signing_key.pem + +# Entity ID value for unique Identity Provider identification. +# Usually FQDN is set with a suffix. A value is required to +# generate IDP Metadata. For example: +# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp +# (string value) +#idp_entity_id= + +# Identity Provider Single-Sign-On service value, required in +# the Identity Provider's metadata. A value is required to +# generate IDP Metadata. For example: +# https://keystone.example.com/v3/OS-FEDERATION/saml2/sso +# (string value) +#idp_sso_endpoint= + +# Language used by the organization. (string value) +#idp_lang=en + +# Organization name the installation belongs to. (string +# value) +#idp_organization_name= + +# Organization name to be displayed. (string value) +#idp_organization_display_name= + +# URL of the organization. (string value) +#idp_organization_url= + +# Company of contact person. (string value) +#idp_contact_company= + +# Given name of contact person (string value) +#idp_contact_name= + +# Surname of contact person. (string value) +#idp_contact_surname= + +# Email address of contact person. (string value) +#idp_contact_email= + +# Telephone number of contact person. (string value) +#idp_contact_telephone= + +# Contact type. Allowed values are: technical, support, +# administrative billing, and other (string value) +#idp_contact_type=other + +# Path to the Identity Provider Metadata file. This file +# should be generated with the keystone-manage +# saml_idp_metadata command. (string value) +#idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml + + +[signing] + +# +# Options defined in keystone +# + +# Deprecated in favor of provider in the [token] section. +# (string value) +#token_format= + +# Path of the certfile for token signing. For non-production +# environments, you may be interested in using `keystone- +# manage pki_setup` to generate self-signed certificates. +# (string value) +#certfile=/etc/keystone/ssl/certs/signing_cert.pem + +# Path of the keyfile for token signing. (string value) +#keyfile=/etc/keystone/ssl/private/signing_key.pem + +# Path of the CA for token signing. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key for token signing. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Key size (in bits) for token signing cert (auto generated +# certificate). (integer value) +#key_size=2048 + +# Days the token signing cert is valid for (auto generated +# certificate). (integer value) +#valid_days=3650 + +# Certificate subject (auto generated certificate) for token +# signing. (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com + + +[ssl] + +# +# Options defined in keystone +# + +# Toggle for SSL support on the Keystone eventlet servers. +# (boolean value) +#enable=false + +# Path of the certfile for SSL. For non-production +# environments, you may be interested in using `keystone- +# manage ssl_setup` to generate self-signed certificates. +# (string value) +#certfile=/etc/keystone/ssl/certs/keystone.pem + +# Path of the keyfile for SSL. (string value) +#keyfile=/etc/keystone/ssl/private/keystonekey.pem + +# Path of the ca cert file for SSL. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key file for SSL. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Require client certificate. (boolean value) +#cert_required=false + +# SSL key length (in bits) (auto generated certificate). +# (integer value) +#key_size=1024 + +# Days the certificate is valid for once signed (auto +# generated certificate). (integer value) +#valid_days=3650 + +# SSL certificate subject (auto generated certificate). +# (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost + + +[stats] + +# +# Options defined in keystone +# + +# Stats backend driver. (string value) +#driver=keystone.contrib.stats.backends.kvs.Stats + + +[token] + +# +# Options defined in keystone +# + +# External auth mechanisms that should add bind information to +# token, e.g., kerberos,x509. (list value) +#bind= + +# Enforcement policy on tokens presented to Keystone with bind +# information. One of disabled, permissive, strict, required +# or a specifically required bind mode, e.g., kerberos or x509 +# to require binding to that authentication. (string value) +#enforce_token_bind=permissive + +# Amount of time a token should remain valid (in seconds). +# (integer value) +#expiration=3600 + +# Controls the token construction, validation, and revocation +# operations. Core providers are +# "keystone.token.providers.[pkiz|pki|uuid].Provider". The +# default provider is pkiz. (string value) +provider=keystone.token.providers.uuid.Provider + +# Token persistence backend driver. (string value) +driver=keystone.token.backends.sql.Token + +# Toggle for token system caching. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# Time to cache the revocation list and the revocation events +# if revoke extension is enabled (in seconds). This has no +# effect unless global and token caching are enabled. (integer +# value) +#revocation_cache_time=3600 + +# Time to cache tokens (in seconds). This has no effect unless +# global and token caching are enabled. (integer value) +#cache_time= + +# Revoke token by token identifier. Setting revoke_by_id to +# true enables various forms of enumerating tokens, e.g. `list +# tokens for user`. These enumerations are processed to +# determine the list of tokens to revoke. Only disable if you +# are switching to using the Revoke extension with a backend +# other than KVS, which stores events in memory. (boolean +# value) +#revoke_by_id=true + +# The hash algorithm to use for PKI tokens. This can be set to +# any algorithm that hashlib supports. WARNING: Before +# changing this value, the auth_token middleware must be +# configured with the hash_algorithms, otherwise token +# revocation will not be processed correctly. (string value) +#hash_algorithm=md5 + + +[trust] + +# +# Options defined in keystone +# + +# Delegation and impersonation features can be optionally +# disabled. (boolean value) +#enabled=true + +# Trust backend driver. (string value) +#driver=keystone.trust.backends.sql.Trust + + diff --git a/install-files/openstack/usr/share/openstack/keystone/logging.conf b/install-files/openstack/usr/share/openstack/keystone/logging.conf new file mode 100644 index 00000000..6cb8c425 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/keystone/logging.conf @@ -0,0 +1,65 @@ +[loggers] +keys=root,access + +[handlers] +keys=production,file,access_file,devel + +[formatters] +keys=minimal,normal,debug + + +########### +# Loggers # +########### + +[logger_root] +level=WARNING +handlers=file + +[logger_access] +level=INFO +qualname=access +handlers=access_file + + +################ +# Log Handlers # +################ + +[handler_production] +class=handlers.SysLogHandler +level=ERROR +formatter=normal +args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) + +[handler_file] +class=handlers.WatchedFileHandler +level=WARNING +formatter=normal +args=('error.log',) + +[handler_access_file] +class=handlers.WatchedFileHandler +level=INFO +formatter=minimal +args=('access.log',) + +[handler_devel] +class=StreamHandler +level=NOTSET +formatter=debug +args=(sys.stdout,) + + +################## +# Log Formatters # +################## + +[formatter_minimal] +format=%(message)s + +[formatter_normal] +format=(%(name)s): %(asctime)s %(levelname)s %(message)s + +[formatter_debug] +format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/install-files/openstack/usr/share/openstack/keystone/policy.json b/install-files/openstack/usr/share/openstack/keystone/policy.json new file mode 100644 index 00000000..af65205e --- /dev/null +++ b/install-files/openstack/usr/share/openstack/keystone/policy.json @@ -0,0 +1,171 @@ +{ + "admin_required": "role:admin or is_admin:1", + "service_role": "role:service", + "service_or_admin": "rule:admin_required or rule:service_role", + "owner" : "user_id:%(user_id)s", + "admin_or_owner": "rule:admin_required or rule:owner", + + "default": "rule:admin_required", + + "identity:get_region": "", + "identity:list_regions": "", + "identity:create_region": "rule:admin_required", + "identity:update_region": "rule:admin_required", + "identity:delete_region": "rule:admin_required", + + "identity:get_service": "rule:admin_required", + "identity:list_services": "rule:admin_required", + "identity:create_service": "rule:admin_required", + "identity:update_service": "rule:admin_required", + "identity:delete_service": "rule:admin_required", + + "identity:get_endpoint": "rule:admin_required", + "identity:list_endpoints": "rule:admin_required", + "identity:create_endpoint": "rule:admin_required", + "identity:update_endpoint": "rule:admin_required", + "identity:delete_endpoint": "rule:admin_required", + + "identity:get_domain": "rule:admin_required", + "identity:list_domains": "rule:admin_required", + "identity:create_domain": "rule:admin_required", + "identity:update_domain": "rule:admin_required", + "identity:delete_domain": "rule:admin_required", + + "identity:get_project": "rule:admin_required", + "identity:list_projects": "rule:admin_required", + "identity:list_user_projects": "rule:admin_or_owner", + "identity:create_project": "rule:admin_required", + "identity:update_project": "rule:admin_required", + "identity:delete_project": "rule:admin_required", + + "identity:get_user": "rule:admin_required", + "identity:list_users": "rule:admin_required", + "identity:create_user": "rule:admin_required", + "identity:update_user": "rule:admin_required", + "identity:delete_user": "rule:admin_required", + "identity:change_password": "rule:admin_or_owner", + + "identity:get_group": "rule:admin_required", + "identity:list_groups": "rule:admin_required", + "identity:list_groups_for_user": "rule:admin_or_owner", + "identity:create_group": "rule:admin_required", + "identity:update_group": "rule:admin_required", + "identity:delete_group": "rule:admin_required", + "identity:list_users_in_group": "rule:admin_required", + "identity:remove_user_from_group": "rule:admin_required", + "identity:check_user_in_group": "rule:admin_required", + "identity:add_user_to_group": "rule:admin_required", + + "identity:get_credential": "rule:admin_required", + "identity:list_credentials": "rule:admin_required", + "identity:create_credential": "rule:admin_required", + "identity:update_credential": "rule:admin_required", + "identity:delete_credential": "rule:admin_required", + + "identity:ec2_get_credential": "rule:admin_or_owner", + "identity:ec2_list_credentials": "rule:admin_or_owner", + "identity:ec2_create_credential": "rule:admin_or_owner", + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + + "identity:get_role": "rule:admin_required", + "identity:list_roles": "rule:admin_required", + "identity:create_role": "rule:admin_required", + "identity:update_role": "rule:admin_required", + "identity:delete_role": "rule:admin_required", + + "identity:check_grant": "rule:admin_required", + "identity:list_grants": "rule:admin_required", + "identity:create_grant": "rule:admin_required", + "identity:revoke_grant": "rule:admin_required", + + "identity:list_role_assignments": "rule:admin_required", + + "identity:get_policy": "rule:admin_required", + "identity:list_policies": "rule:admin_required", + "identity:create_policy": "rule:admin_required", + "identity:update_policy": "rule:admin_required", + "identity:delete_policy": "rule:admin_required", + + "identity:check_token": "rule:admin_required", + "identity:validate_token": "rule:service_or_admin", + "identity:validate_token_head": "rule:service_or_admin", + "identity:revocation_list": "rule:service_or_admin", + "identity:revoke_token": "rule:admin_or_owner", + + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", + "identity:get_trust": "rule:admin_or_owner", + "identity:list_trusts": "", + "identity:list_roles_for_trust": "", + "identity:check_role_for_trust": "", + "identity:get_role_for_trust": "", + "identity:delete_trust": "", + + "identity:create_consumer": "rule:admin_required", + "identity:get_consumer": "rule:admin_required", + "identity:list_consumers": "rule:admin_required", + "identity:delete_consumer": "rule:admin_required", + "identity:update_consumer": "rule:admin_required", + + "identity:authorize_request_token": "rule:admin_required", + "identity:list_access_token_roles": "rule:admin_required", + "identity:get_access_token_role": "rule:admin_required", + "identity:list_access_tokens": "rule:admin_required", + "identity:get_access_token": "rule:admin_required", + "identity:delete_access_token": "rule:admin_required", + + "identity:list_projects_for_endpoint": "rule:admin_required", + "identity:add_endpoint_to_project": "rule:admin_required", + "identity:check_endpoint_in_project": "rule:admin_required", + "identity:list_endpoints_for_project": "rule:admin_required", + "identity:remove_endpoint_from_project": "rule:admin_required", + + "identity:create_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups": "rule:admin_required", + "identity:get_endpoint_group": "rule:admin_required", + "identity:update_endpoint_group": "rule:admin_required", + "identity:delete_endpoint_group": "rule:admin_required", + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups_for_project": "rule:admin_required", + "identity:add_endpoint_group_to_project": "rule:admin_required", + "identity:remove_endpoint_group_from_project": "rule:admin_required", + + "identity:create_identity_provider": "rule:admin_required", + "identity:list_identity_providers": "rule:admin_required", + "identity:get_identity_providers": "rule:admin_required", + "identity:update_identity_provider": "rule:admin_required", + "identity:delete_identity_provider": "rule:admin_required", + + "identity:create_protocol": "rule:admin_required", + "identity:update_protocol": "rule:admin_required", + "identity:get_protocol": "rule:admin_required", + "identity:list_protocols": "rule:admin_required", + "identity:delete_protocol": "rule:admin_required", + + "identity:create_mapping": "rule:admin_required", + "identity:get_mapping": "rule:admin_required", + "identity:list_mappings": "rule:admin_required", + "identity:delete_mapping": "rule:admin_required", + "identity:update_mapping": "rule:admin_required", + + "identity:get_auth_catalog": "", + "identity:get_auth_projects": "", + "identity:get_auth_domains": "", + + "identity:list_projects_for_groups": "", + "identity:list_domains_for_groups": "", + + "identity:list_revoke_events": "", + + "identity:create_policy_association_for_endpoint": "rule:admin_required", + "identity:check_policy_association_for_endpoint": "rule:admin_required", + "identity:delete_policy_association_for_endpoint": "rule:admin_required", + "identity:create_policy_association_for_service": "rule:admin_required", + "identity:check_policy_association_for_service": "rule:admin_required", + "identity:delete_policy_association_for_service": "rule:admin_required", + "identity:create_policy_association_for_region_and_service": "rule:admin_required", + "identity:check_policy_association_for_region_and_service": "rule:admin_required", + "identity:delete_policy_association_for_region_and_service": "rule:admin_required", + "identity:get_policy_for_endpoint": "rule:admin_required", + "identity:list_endpoints_for_policy": "rule:admin_required" +} diff --git a/install-files/openstack/usr/share/openstack/network.yml b/install-files/openstack/usr/share/openstack/network.yml new file mode 100644 index 00000000..f99f7f1a --- /dev/null +++ b/install-files/openstack/usr/share/openstack/network.yml @@ -0,0 +1,67 @@ +--- +- hosts: localhost + vars_files: + - /etc/openstack/network.conf + tasks: +# Create the bridges to use the External network mapped + +# Count number of network interfaces (interfaces starting with 'e') + - shell: ls /sys/class/net | grep ^e.* | wc -l + register: number_interfaces + +# Abort if there number of interfaces != 1 + - fail: + msg: More than one, or none network interfaces found. + when: EXTERNAL_INTERFACE is not defined and number_interfaces.stdout != "1" + + - shell: ls /sys/class/net | grep ^e.* + register: interface_name + when: EXTERNAL_INTERFACE is not defined + + - set_fact: + ETH_INTERFACE: "{{ interface_name.stdout }}" + when: EXTERNAL_INTERFACE is not defined + + - set_fact: + ETH_INTERFACE: "{{ EXTERNAL_INTERFACE }}" + when: EXTERNAL_INTERFACE is defined + + - set_fact: + ETH_MAC_ADDRESS: "{{ hostvars['localhost']['ansible_' + ETH_INTERFACE]['macaddress'] }}" + + - name: Create the /run/systemd/network + file: + path: /run/systemd/network + state: directory + + - name: Disable dhcp on the bound physical interface + template: + src: /usr/share/openstack/extras/00-disable-device.network + dest: /run/systemd/network/00-disable-{{ item }}-config.network + with_items: + - "{{ ETH_INTERFACE }}" + + - name: Disable dhcp on all the internal interfaces + template: + src: /usr/share/openstack/extras/00-disable-device.network + dest: /run/systemd/network/00-disable-{{ item }}-config.network + with_items: + - ovs-system + + - openvswitch_bridge: + bridge: br-ex + state: present + + - openvswitch_port: + bridge: br-ex + port: "{{ ETH_INTERFACE }}" + state: present + + - shell: ovs-vsctl set bridge br-ex other-config:hwaddr={{ ETH_MAC_ADDRESS }} + + - name: Enable dhcp on the Open vSwitch device that replaces our external interface + template: + src: /usr/share/openstack/extras/60-device-dhcp.network + dest: /run/systemd/network/60-{{ item }}-dhcp.network + with_items: + - br-ex diff --git a/install-files/openstack/usr/share/openstack/neutron-config.yml b/install-files/openstack/usr/share/openstack/neutron-config.yml new file mode 100644 index 00000000..97f4c76e --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron-config.yml @@ -0,0 +1,48 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/neutron.conf" + tasks: + + - name: Create the neutron user. + user: + name: neutron + comment: Openstack Neutron Daemons + shell: /sbin/nologin + home: /var/lib/neutron + + - name: Create the /var folders for neutron + file: + path: "{{ item }}" + state: directory + owner: neutron + group: neutron + with_items: + - /var/run/neutron + - /var/lock/neutron + - /var/log/neutron + + - name: Get service tenant id needed in neutron.conf + shell: | + keystone \ + --os-endpoint http://{{ CONTROLLER_HOST_ADDRESS|quote }}:35357/v2.0 \ + --os-token {{ KEYSTONE_TEMPORARY_ADMIN_TOKEN|quote }} \ + tenant-get service | grep id | tr -d " " | cut -d"|" -f3 + register: tenant_service_id + + - set_fact: + SERVICE_TENANT_ID: "{{ tenant_service_id.stdout }}" + + - name: Create the directories needed for Neutron configuration files. + file: + path: /etc/{{ item }} + state: directory + with_lines: + - cd /usr/share/openstack && find neutron -type d + + - name: Add configuration needed for neutron using templates + template: + src: /usr/share/openstack/{{ item }} + dest: /etc/{{ item }} + with_lines: + - cd /usr/share/openstack && find neutron -type f diff --git a/install-files/openstack/usr/share/openstack/neutron-db.yml b/install-files/openstack/usr/share/openstack/neutron-db.yml new file mode 100644 index 00000000..91dde6fe --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron-db.yml @@ -0,0 +1,51 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/neutron.conf" + tasks: + - name: Create neutron service user in service tenant + keystone_user: + user: "{{ NEUTRON_SERVICE_USER }}" + password: "{{ NEUTRON_SERVICE_PASSWORD }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Add admin role to neutron service user in service tenant + keystone_user: + role: admin + user: "{{ NEUTRON_SERVICE_USER }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - keystone_service: + name: neutron + type: network + description: Openstack Compute Networking + publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696 + internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696 + adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696 + region: regionOne + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Create postgresql user for neutron + postgresql_user: + name: "{{ NEUTRON_DB_USER }}" + password: "{{ NEUTRON_DB_PASSWORD }}" + sudo: yes + sudo_user: neutron + + - name: Create database for neutron services + postgresql_db: + name: neutron + owner: "{{ NEUTRON_DB_USER }}" + sudo: yes + sudo_user: neutron + + - name: Initiate neutron database + shell: | + neutron-db-manage \ + --config-file /etc/neutron/neutron.conf \ + --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \ + upgrade juno + sudo: yes + sudo_user: neutron diff --git a/install-files/openstack/usr/share/openstack/neutron/api-paste.ini b/install-files/openstack/usr/share/openstack/neutron/api-paste.ini new file mode 100644 index 00000000..bbcd4152 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/api-paste.ini @@ -0,0 +1,30 @@ +[composite:neutron] +use = egg:Paste#urlmap +/: neutronversions +/v2.0: neutronapi_v2_0 + +[composite:neutronapi_v2_0] +use = call:neutron.auth:pipeline_factory +noauth = request_id catch_errors extensions neutronapiapp_v2_0 +keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 + +[filter:request_id] +paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:catch_errors] +paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory + +[filter:keystonecontext] +paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +[filter:extensions] +paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory + +[app:neutronversions] +paste.app_factory = neutron.api.versions:Versions.factory + +[app:neutronapiapp_v2_0] +paste.app_factory = neutron.api.v2.router:APIRouter.factory diff --git a/install-files/openstack/usr/share/openstack/neutron/dhcp_agent.ini b/install-files/openstack/usr/share/openstack/neutron/dhcp_agent.ini new file mode 100644 index 00000000..c6c2b9a7 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/dhcp_agent.ini @@ -0,0 +1,89 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +use_syslog = True + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# resync_interval = 5 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +enable_isolated_metadata = True + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +# enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +# dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +# dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/install-files/openstack/usr/share/openstack/neutron/fwaas_driver.ini b/install-files/openstack/usr/share/openstack/neutron/fwaas_driver.ini new file mode 100644 index 00000000..41f761ab --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/fwaas_driver.ini @@ -0,0 +1,3 @@ +[fwaas] +#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver +#enabled = True diff --git a/install-files/openstack/usr/share/openstack/neutron/l3_agent.ini b/install-files/openstack/usr/share/openstack/neutron/l3_agent.ini new file mode 100644 index 00000000..000cd997 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/l3_agent.ini @@ -0,0 +1,103 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +use_syslog = True + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +# handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +# metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +# send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +# periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +# periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 + +# The working mode for the agent. Allowed values are: +# - legacy: this preserves the existing behavior where the L3 agent is +# deployed on a centralized networking node to provide L3 services +# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR. +# - dvr: this mode enables DVR functionality, and must be used for an L3 +# agent that runs on a compute host. +# - dvr_snat: this enables centralized SNAT support in conjunction with +# DVR. This mode must be used for an L3 agent running on a centralized +# node (or in single-host deployments, e.g. devstack). +# agent_mode = legacy + +# Location to store keepalived and all HA configurations +# ha_confs_path = $state_path/ha_confs + +# VRRP authentication type AH/PASS +# ha_vrrp_auth_type = PASS + +# VRRP authentication password +# ha_vrrp_auth_password = + +# The advertisement interval in seconds +# ha_vrrp_advert_int = 2 diff --git a/install-files/openstack/usr/share/openstack/neutron/lbaas_agent.ini b/install-files/openstack/usr/share/openstack/neutron/lbaas_agent.ini new file mode 100644 index 00000000..68a2759e --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/lbaas_agent.ini @@ -0,0 +1,42 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output). +# debug = False + +# The LBaaS agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# periodic_interval = 10 + +# LBaas requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version. +# Multiple device drivers reflecting different service providers could be specified: +# device_driver = path.to.provider1.driver.Driver +# device_driver = path.to.provider2.driver.Driver +# Default is: +# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver + +[haproxy] +# Location to store config and state files +# loadbalancer_state_path = $state_path/lbaas + +# The user group +# user_group = nogroup + +# When delete and re-add the same vip, send this many gratuitous ARPs to flush +# the ARP cache in the Router. Set it below or equal to 0 to disable this feature. +# send_gratuitous_arp = 3 diff --git a/install-files/openstack/usr/share/openstack/neutron/metadata_agent.ini b/install-files/openstack/usr/share/openstack/neutron/metadata_agent.ini new file mode 100644 index 00000000..ed238770 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/metadata_agent.ini @@ -0,0 +1,60 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = True +use_syslog = True + +# The Neutron user information for accessing the Neutron API. +auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 +auth_region = regionOne +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = service +admin_user = {{ NEUTRON_SERVICE_USER }} +admin_password = {{ NEUTRON_SERVICE_PASSWORD }} + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +nova_metadata_ip = {{ CONTROLLER_HOST_ADDRESS }} + +# TCP Port used by Nova metadata server +# nova_metadata_port = 8775 + +# Which protocol to use for requests to Nova metadata server, http or https +# nova_metadata_protocol = http + +# Whether insecure SSL connection should be accepted for Nova metadata server +# requests +# nova_metadata_insecure = False + +# Client certificate for nova api, needed when nova api requires client +# certificates +# nova_client_cert = + +# Private key for nova client certificate +# nova_client_priv_key = + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +metadata_proxy_shared_secret = {{ METADATA_PROXY_SHARED_SECRET }} + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server. Defaults to +# half the number of CPU cores +# metadata_workers = + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 4096 + +# URL to connect to the cache backend. +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid for. +# No cache is used in case no value is passed. +# cache_url = memory://?default_ttl=5 diff --git a/install-files/openstack/usr/share/openstack/neutron/metering_agent.ini b/install-files/openstack/usr/share/openstack/neutron/metering_agent.ini new file mode 100644 index 00000000..88826ce7 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/metering_agent.ini @@ -0,0 +1,18 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = True + +# Default driver: +# driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver +# Example of non-default driver +# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver + +# Interval between two metering measures +# measure_interval = 30 + +# Interval between two metering reports +# report_interval = 300 + +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# use_namespaces = True diff --git a/install-files/openstack/usr/share/openstack/neutron/neutron.conf b/install-files/openstack/usr/share/openstack/neutron/neutron.conf new file mode 100644 index 00000000..51de7464 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/neutron.conf @@ -0,0 +1,640 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +# verbose = False + +# =========Start Global Config Option for Distributed L3 Router=============== +# Setting the "router_distributed" flag to "True" will default to the creation +# of distributed tenant routers. The admin can override this flag by specifying +# the type of the router on the create request (admin-only attribute). Default +# value is "False" to support legacy mode (centralized) routers. +# +# router_distributed = False +# +# ===========End Global Config Option for Distributed L3 Router=============== + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +# debug = False + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +use_syslog = True + +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +# bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +service_plugins = router +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# DVR Base MAC address. The first 3 octets will remain unchanged. If the +# 4th octet is not 00, it will also be used. The others will be randomly +# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to +# avoid mixing them up with MAC's allocated for tenant ports. +# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00 +# The default is 3 octet +# dvr_base_mac = fa:16:3f:00:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds). Use -1 to +# tell dnsmasq to use infinite lease times. +# dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet. For IPv6, validate only if +# gateway is not a link local address. Deprecated, to be removed during the +# K release, at which point the check will be mandatory. +# force_gateway_on_subnet = True + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# Maximum number of routes per router +# max_routes = 30 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Allow automatic rescheduling of routers from dead L3 agents with +# admin_state_up set to True to alive agents. +# allow_automatic_l3agent_failover = False + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== items for l3 extension ============== +# Enable high availability for virtual routers. +# l3_ha = False +# +# Maximum number of l3 agents which a HA router will be scheduled on. If it +# is set to 0 the router will be scheduled on every agent. +# max_l3_agents_per_router = 3 +# +# Minimum number of l3 agents which a HA router will be scheduled on. The +# default value is 2. +# min_l3_agents_per_router = 2 +# +# CIDR of the administrative network if HA mode is enabled +# l3_ha_net_cidr = 169.254.192.0/18 +# =========== end of items for l3 extension ======= + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = regionOne + +# Username for connection to nova in admin context +nova_admin_username = {{ NOVA_SERVICE_USER }} + +# The uuid of the admin nova tenant +nova_admin_tenant_id = {{ SERVICE_TENANT_ID }} + +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_SERVICE_PASSWORD }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 + +# CA file for novaclient to verify server certificates +# nova_ca_certificates_file = + +# Boolean to control ignoring SSL errors on the nova url +# nova_api_insecure = False + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +rabbit_host={{ RABBITMQ_HOST }} + +# The RabbitMQ broker port where a single node is used. +# (integer value) +rabbit_port={{ RABBITMQ_PORT }} + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid={{ RABBITMQ_USER }} + +# The RabbitMQ password. (string value) +rabbit_password={{ RABBITMQ_PASSWORD }} + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=oslo + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +notification_driver=neutron.openstack.common.notifier.rpc_notifier + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + +[quotas] +# Default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +# default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# quota_network = 10 + +# Number of subnets allowed per tenant. A negative value means unlimited. +# quota_subnet = 10 + +# Number of ports allowed per tenant. A negative value means unlimited. +# quota_port = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +# quota_security_group = 10 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +# quota_security_group_rule = 100 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitor = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +# Number of firewalls allowed per tenant. A negative value means unlimited. +# quota_firewall = 1 + +# Number of firewall policies allowed per tenant. A negative value means +# unlimited. +# quota_firewall_policy = 1 + +# Number of firewall rules allowed per tenant. A negative value means +# unlimited. +# quota_firewall_rule = 100 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +# root_helper = sudo +root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 +identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 +admin_tenant_name = service +admin_user = {{ NEUTRON_SERVICE_USER }} +admin_password = {{ NEUTRON_SERVICE_PASSWORD }} + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:// +# NOTE: In deployment the [database] section and its connection attribute may +# be set in the corresponding core plugin '.ini' file. However, it is suggested +# to put the [database] section and its connection attribute in this +# configuration file. +#connection=sqlite:////var/lib/neutron/neutron.sqlite +connection=postgresql://{{ NEUTRON_DB_USER }}:{{ NEUTRON_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/neutron + +# Database engine for which script will be generated when using offline +# migration +# engine = + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default +# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'. +#service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default +# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend +# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini new file mode 100644 index 00000000..256f7855 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini @@ -0,0 +1,114 @@ +# Config file for neutron-proxy-plugin. + +[restproxy] +# All configuration for this plugin is in section '[restproxy]' +# +# The following parameters are supported: +# servers : [,]* (Error if not set) +# server_auth : (default: no auth) +# server_ssl : True | False (default: True) +# ssl_cert_directory : (default: /etc/neutron/plugins/bigswitch/ssl) +# no_ssl_validation : True | False (default: False) +# ssl_sticky : True | False (default: True) +# sync_data : True | False (default: False) +# auto_sync_on_failure : True | False (default: True) +# consistency_interval : (default: 60 seconds) +# server_timeout : (default: 10 seconds) +# neutron_id : (default: neutron-) +# add_meta_server_route : True | False (default: True) +# thread_pool_size : (default: 4) + +# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover. +servers=localhost:8080 + +# The username and password for authenticating against the BigSwitch or Floodlight controller. +# server_auth=username:password + +# Use SSL when connecting to the BigSwitch or Floodlight controller. +# server_ssl=True + +# Directory which contains the ca_certs and host_certs to be used to validate +# controller certificates. +# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/ + +# If a certificate does not exist for a controller, trust and store the first +# certificate received for that controller and use it to validate future +# connections to that controller. +# ssl_sticky=True + +# Do not validate the controller certificates for SSL +# Warning: This will not provide protection against man-in-the-middle attacks +# no_ssl_validation=False + +# Sync data on connect +# sync_data=False + +# If neutron fails to create a resource because the backend controller +# doesn't know of a dependency, automatically trigger a full data +# synchronization to the controller. +# auto_sync_on_failure=True + +# Time between verifications that the backend controller +# database is consistent with Neutron. (0 to disable) +# consistency_interval = 60 + +# Maximum number of seconds to wait for proxy request to connect and complete. +# server_timeout=10 + +# User defined identifier for this Neutron deployment +# neutron_id = + +# Flag to decide if a route to the metadata server should be injected into the VM +# add_meta_server_route = True + +# Number of threads to use to handle large volumes of port creation requests +# thread_pool_size = 4 + +[nova] +# Specify the VIF_TYPE that will be controlled on the Nova compute instances +# options: ivs or ovs +# default: ovs +# vif_type = ovs + +# Overrides for vif types based on nova compute node host IDs +# Comma separated list of host IDs to fix to a specific VIF type +# The VIF type is taken from the end of the configuration item +# node_override_vif_ +# For example, the following would set the VIF type to IVS for +# host-id1 and host-id2 +# node_overrride_vif_ivs=host-id1,host-id2 + +[router] +# Specify the default router rules installed in newly created tenant routers +# Specify multiple times for multiple rules +# Format is ::: +# Optionally, a comma-separated list of nexthops may be included after +# Use an * to specify default for all tenants +# Default is any any allow for all tenants +# tenant_default_router_rule=*:any:any:permit + +# Maximum number of rules that a single router may have +# Default is 200 +# max_router_rules=200 + +[restproxyagent] + +# Specify the name of the bridge used on compute nodes +# for attachment. +# Default: br-int +# integration_bridge=br-int + +# Change the frequency of polling by the restproxy agent. +# Value is seconds +# Default: 5 +# polling_interval=5 + +# Virtual switch type on the compute node. +# Options: ovs or ivs +# Default: ovs +# virtual_switch_type = ovs + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README new file mode 100644 index 00000000..e7e47a27 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README @@ -0,0 +1,3 @@ +Certificates in this folder will be used to +verify signatures for any controllers the plugin +connects to. diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README new file mode 100644 index 00000000..8f5f5e77 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README @@ -0,0 +1,6 @@ +Certificates in this folder must match the name +of the controller they should be used to authenticate +with a .pem extension. + +For example, the certificate for the controller +"192.168.0.1" should be named "192.168.0.1.pem". diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini new file mode 100644 index 00000000..916e9e5d --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini @@ -0,0 +1,29 @@ +[switch] +# username = The SSH username to use +# password = The SSH password to use +# address = The address of the host to SSH to +# ostype = Should be NOS, but is unused otherwise +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS + +[physical_interface] +# physical_interface = The network interface to use when creating a port +# +# Example: +# physical_interface = physnet1 + +[vlans] +# network_vlan_ranges = :nnnn:mmmm +# +# Example: +# network_vlan_ranges = physnet1:1000:2999 + +[linux_bridge] +# physical_interface_mappings = : +# +# Example: +# physical_interface_mappings = physnet1:em1 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini new file mode 100644 index 00000000..d99e8382 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini @@ -0,0 +1,15 @@ +[cfg_agent] +# (IntOpt) Interval in seconds for processing of service updates. +# That is when the config agent's process_services() loop executes +# and it lets each service helper to process its service resources. +# rpc_loop_interval = 10 + +# (StrOpt) Period-separated module path to the routing service helper class. +# routing_svc_helper_class = neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper + +# (IntOpt) Timeout value in seconds for connecting to a hosting device. +# device_connection_timeout = 30 + +# (IntOpt) The time in seconds until a backlogged hosting device is +# presumed dead or booted to an error state. +# hosting_device_dead_timeout = 300 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini new file mode 100644 index 00000000..17eae737 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini @@ -0,0 +1,100 @@ +[cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# provider VLAN interface. For example, if an interface is being created +# for provider VLAN 3003 it will be named 'p-3003' using the default prefix. +# +# provider_vlan_name_prefix = p- +# Example: provider_vlan_name_prefix = PV- + +# (BoolOpt) A flag indicating whether Openstack networking should manage the +# creation and removal of VLAN interfaces for provider networks on the Nexus +# switches. If the flag is set to False then Openstack will not create or +# remove VLAN interfaces for provider networks, and the administrator needs +# to manage these interfaces manually or by external orchestration. +# +# provider_vlan_auto_create = True + +# (BoolOpt) A flag indicating whether Openstack networking should manage +# the adding and removing of provider VLANs from trunk ports on the Nexus +# switches. If the flag is set to False then Openstack will not add or +# remove provider VLANs from trunk ports, and the administrator needs to +# manage these operations manually or by external orchestration. +# +# provider_vlan_auto_trunk = True + +# (StrOpt) Period-separated module path to the model class to use for +# the Cisco neutron plugin. +# +# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2 + +# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches. +# Note: This feature is not supported on all models/versions of Cisco +# Nexus switches. To use this feature, all of the Nexus switches in the +# deployment must support it. +# nexus_l3_enable = False + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# N1KV Format. +# [N1KV:] +# username= +# password= +# +# Example: +# [N1KV:2.2.2.2] +# username=admin +# password=mySecretPassword + +[cisco_n1k] + +# (StrOpt) Specify the name of the integration bridge to which the VIFs are +# attached. +# Default value: br-int +# integration_bridge = br-int + +# (StrOpt) Name of the policy profile to be associated with a port when no +# policy profile is specified during port creates. +# Default value: service_profile +# default_policy_profile = service_profile + +# (StrOpt) Name of the policy profile to be associated with a port owned by +# network node (dhcp, router). +# Default value: dhcp_pp +# network_node_policy_profile = dhcp_pp + +# (StrOpt) Name of the network profile to be associated with a network when no +# network profile is specified during network creates. Admin should pre-create +# a network profile with this name. +# Default value: default_network_profile +# default_network_profile = network_pool + +# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in +# policy profiles. +# Default value: 60 +# poll_duration = 60 + +# (BoolOpt) Specify whether tenants are restricted from accessing all the +# policy profiles. +# Default value: False, indicating all tenants can access all policy profiles. +# +# restrict_policy_profiles = False + +# (IntOpt) Number of threads to use to make HTTP requests to the VSM. +# Default value: 4 +# http_pool_size = 4 + +# (IntOpt) Timeout duration in seconds for the http request +# Default value: 15 +# http_timeout = 15 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini new file mode 100644 index 00000000..3ef271d2 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini @@ -0,0 +1,76 @@ +[general] +#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers +# backlog_processing_interval = 10 + +#(StrOpt) Name of the L3 admin tenant +# l3_admin_tenant = L3AdminTenant + +#(StrOpt) Name of management network for hosting device configuration +# management_network = osn_mgmt_nw + +#(StrOpt) Default security group applied on management port +# default_security_group = mgmt_sec_grp + +#(IntOpt) Seconds of no status update until a cfg agent is considered down +# cfg_agent_down_time = 60 + +#(StrOpt) Path to templates for hosting devices +# templates_path = /opt/stack/data/neutron/cisco/templates + +#(StrOpt) Path to config drive files for service VM instances +# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive + +#(BoolOpt) Ensure that Nova is running before attempting to create any VM +# ensure_nova_running = True + +[hosting_devices] +# Settings coupled to CSR1kv VM devices +# ------------------------------------- +#(StrOpt) Name of Glance image for CSR1kv +# csr1kv_image = csr1kv_openstack_img + +#(StrOpt) UUID of Nova flavor for CSR1kv +# csr1kv_flavor = 621 + +#(StrOpt) Plugging driver for CSR1kv +# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver + +#(StrOpt) Hosting device driver for CSR1kv +# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver + +#(StrOpt) Config agent router service driver for CSR1kv +# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver + +#(StrOpt) Configdrive template file for CSR1kv +# csr1kv_configdrive_template = csr1kv_cfg_template + +#(IntOpt) Booting time in seconds before a CSR1kv becomes operational +# csr1kv_booting_time = 420 + +#(StrOpt) Username to use for CSR1kv configurations +# csr1kv_username = stack + +#(StrOpt) Password to use for CSR1kv configurations +# csr1kv_password = cisco + +[n1kv] +# Settings coupled to inter-working with N1kv plugin +# -------------------------------------------------- +#(StrOpt) Name of N1kv port profile for management ports +# management_port_profile = osn_mgmt_pp + +#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic +# from VXLAN segmented networks). +# t1_port_profile = osn_t1_pp + +#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic +# from VLAN segmented networks). +# t2_port_profile = osn_t2_pp + +#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks +# for VXLAN segmented traffic). +# t1_network_profile = osn_t1_np + +#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks +# for VLAN segmented traffic). +# t2_network_profile = osn_t2_np diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini new file mode 100644 index 00000000..0aee17eb --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini @@ -0,0 +1,26 @@ +[cisco_csr_ipsec] +# Status check interval in seconds, for VPNaaS IPSec connections used on CSR +# status_check_interval = 60 + +# Cisco CSR management port information for REST access used by VPNaaS +# TODO(pcm): Remove once CSR is integrated in as a Neutron router. +# +# Format is: +# [cisco_csr_rest:] +# rest_mgmt = +# tunnel_ip = +# username = +# password = +# timeout = +# host = +# tunnel_if = +# +# where: +# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR) +# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel +# mgmt port IP -- IP address of CSR for REST API access +# user ---------- Username for REST management port access to Cisco CSR +# password ------ Password for REST management port access to Cisco CSR +# timeout ------- REST request timeout to Cisco CSR (optional) +# hostname ------ Name of host where CSR is running as a VM +# tunnel I/F ---- CSR port name used for tunnels' IP address diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini new file mode 100644 index 00000000..0ca9b46f --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini @@ -0,0 +1,41 @@ +[heleos] +#configure the ESM management address +#in the first version of this plugin, only one ESM can be specified +#Example: +#esm_mgmt= + +#configure admin username and password +#admin_username= +#admin_password= + +#router image id +#Example: +#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0 + +#mgmt shared security zone id +#defines the shared management security zone. Each tenant can have a private one configured through the ESM +#Example: +#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a + +#in-band shared security zone id +#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM +#Example: +#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc + +#oob-band shared security zone id +#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM +#Example: +#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871 + +#dummy security zone id +#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces +#Example: +#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08 + +#resource pool id +#define the shared resource pool. Each tenant can have a private one configured through the ESM +#Example +#resource_pool_id= + +#define if the requests have to be executed asynchronously by the plugin or not +#async_requests= diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini new file mode 100644 index 00000000..5eeec570 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini @@ -0,0 +1,63 @@ +[hyperv] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST either change this +# to 'vlan' and configure network_vlan_ranges below or to 'flat'. +# Set to 'none' to disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only gre and local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (ListOpt) Comma separated list of : +# where the physical networks can be expressed with wildcards, +# e.g.: ."*:external". +# The referred external virtual switches need to be already present on +# the Hyper-V server. +# If a given physical network name will not match any value in the list +# the plugin will look for a virtual switch with the same name. +# +# physical_network_vswitch_mappings = *:external +# Example: physical_network_vswitch_mappings = net1:external1,net2:external2 + +# (StrOpt) Private virtual switch name used for local networking. +# +# local_network_vswitch = private +# Example: local_network_vswitch = custom_vswitch + +# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's +# metric APIs. Collected data can by retrieved by other apps and services, +# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above. +# +# enable_metrics_collection = False + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# Neutron server: +# +# [HYPERV] +# tenant_network_type = vlan +# network_vlan_ranges = default:2000:3999 +# +# Agent running on Hyper-V node: +# +# [AGENT] +# polling_interval = 2 +# physical_network_vswitch_mappings = *:external +# local_network_vswitch = private diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini new file mode 100644 index 00000000..0fab5070 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini @@ -0,0 +1,50 @@ +[sdnve] +# (ListOpt) The IP address of one (or more) SDN-VE controllers +# Default value is: controller_ips = 127.0.0.1 +# Example: controller_ips = 127.0.0.1,127.0.0.2 +# (StrOpt) The integration bridge for OF based implementation +# The default value for integration_bridge is None +# Example: integration_bridge = br-int +# (ListOpt) The interface mapping connecting the integration +# bridge to external network as a list of physical network names and +# interfaces: : +# Example: interface_mappings = default:eth2 +# (BoolOpt) Used to reset the integration bridge, if exists +# The default value for reset_bridge is True +# Example: reset_bridge = False +# (BoolOpt) Used to set the OVS controller as out-of-band +# The default value for out_of_band is True +# Example: out_of_band = False +# +# (BoolOpt) The fake controller for testing purposes +# Default value is: use_fake_controller = False +# (StrOpt) The port number for use with controller +# The default value for the port is 8443 +# Example: port = 8443 +# (StrOpt) The userid for use with controller +# The default value for the userid is admin +# Example: userid = sdnve_user +# (StrOpt) The password for use with controller +# The default value for the password is admin +# Example: password = sdnve_password +# +# (StrOpt) The default type of tenants (and associated resources) +# Available choices are: OVERLAY or OF +# The default value for tenant type is OVERLAY +# Example: default_tenant_type = OVERLAY +# (StrOpt) The string in tenant description that indicates +# Default value for OF tenants: of_signature = SDNVE-OF +# (StrOpt) The string in tenant description that indicates +# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY + +[sdnve_agent] +# (IntOpt) Agent's polling interval in seconds +# polling_interval = 2 +# (StrOpt) What to use for root helper +# The default value: root_helper = 'sudo' +# (BoolOpt) Whether to use rpc or not +# The default value: rpc = True + +[securitygroup] +# The security group is not supported: +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini new file mode 100644 index 00000000..94fe9803 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini @@ -0,0 +1,78 @@ +[vlans] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST change this to +# 'vlan' and configure network_vlan_ranges below in order for tenant +# networks to provide connectivity between hosts. Set to 'none' to +# disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +[linux_bridge] +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to the agent's node-specific physical network +# interfaces to be used for flat and VLAN networks. All physical +# networks listed in network_vlan_ranges on the server should have +# mappings to appropriate interfaces on each agent. +# +# physical_interface_mappings = +# Example: physical_interface_mappings = physnet1:eth1 + +[vxlan] +# (BoolOpt) enable VXLAN on the agent +# VXLAN support can be enabled when agent is managed by ml2 plugin using +# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin. +# enable_vxlan = False +# +# (IntOpt) use specific TTL for vxlan interface protocol packets +# ttl = +# +# (IntOpt) use specific TOS for vxlan interface protocol packets +# tos = +# +# (StrOpt) multicast group to use for broadcast emulation. +# This group must be the same on all the agents. +# vxlan_group = 224.0.0.1 +# +# (StrOpt) Local IP address to use for VXLAN endpoints (required) +# local_ip = +# +# (BoolOpt) Flag to enable l2population extension. This option should be used +# in conjunction with ml2 plugin l2population mechanism driver (in that case, +# both linuxbridge and l2population mechanism drivers should be loaded). +# It enables plugin to populate VXLAN forwarding table, in order to limit +# the use of broadcast emulation (multicast will be turned off if kernel and +# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10) +# l2_population = False + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (BoolOpt) Enable server RPC compatibility with old (pre-havana) +# agents. +# +# rpc_support_old_agents = False +# Example: rpc_support_old_agents = True + +[securitygroup] +# Firewall driver for realizing neutron security group function +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini new file mode 100644 index 00000000..2b9bfa5e --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini @@ -0,0 +1,31 @@ +# Config file for Metaplugin + +[meta] +# Comma separated list of flavor:neutron_plugin for plugins to load. +# Extension method is searched in the list order and the first one is used. +plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2' + +# Comma separated list of flavor:neutron_plugin for L3 service plugins +# to load. +# This is intended for specifying L2 plugins which support L3 functions. +# If you use a router service plugin, set this blank. +l3_plugin_list = + +# Default flavor to use, when flavor:network is not specified at network +# creation. +default_flavor = 'nvp' + +# Default L3 flavor to use, when flavor:router is not specified at router +# creation. +# Ignored if 'l3_plugin_list' is blank. +default_l3_flavor = + +# Comma separated list of supported extension aliases. +supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler' + +# Comma separated list of method:flavor to select specific plugin for a method. +# This has priority over method search order based on 'plugin_list'. +extension_map = 'get_port_stats:nvp' + +# Specifies flavor for plugin to handle 'q-plugin' RPC requests. +rpc_flavor = 'ml2' diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini new file mode 100644 index 00000000..f2e94052 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini @@ -0,0 +1,19 @@ + +[midonet] +# MidoNet API server URI +# midonet_uri = http://localhost:8080/midonet-api + +# MidoNet admin username +# username = admin + +# MidoNet admin password +# password = passw0rd + +# ID of the project that MidoNet admin user belongs to +# project_id = 77777777-7777-7777-7777-777777777777 + +# Virtual provider router ID +# provider_router_id = 00112233-0011-0011-0011-001122334455 + +# Path to midonet host uuid file +# midonet_host_uuid_path = /etc/midolman/host_uuid.properties diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini new file mode 100644 index 00000000..b8097ce2 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini @@ -0,0 +1,86 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan +type_drivers = flat,gre + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan +tenant_network_types = gre + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade +mechanism_drivers = openvswitch + +# (ListOpt) Ordered list of extension driver entrypoints +# to be loaded from the neutron.ml2.extension_drivers namespace. +# extension_drivers = +# Example: extension_drivers = anewextensiondriver + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +# flat_networks = +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * +flat_networks = External + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 +#network_vlan_ranges = Physnet1:100:200 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +tunnel_id_ranges = 1:1000 + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +# vxlan_group = +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +enable_security_group = True + +# Use ipset to speed-up the iptables security groups. Enabling ipset support +# requires that ipset is installed on L2 agent node. +enable_ipset = True + +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +[ovs] +local_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} +enable_tunneling = True +bridge_mappings=External:br-ex + +[agent] +tunnel_types = gre diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini new file mode 100644 index 00000000..abaf5bc7 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini @@ -0,0 +1,100 @@ +# Defines configuration options specific for Arista ML2 Mechanism driver + +[ml2_arista] +# (StrOpt) EOS IP address. This is required field. If not set, all +# communications to Arista EOS will fail +# +# eapi_host = +# Example: eapi_host = 192.168.0.1 +# +# (StrOpt) EOS command API username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_username = +# Example: arista_eapi_username = admin +# +# (StrOpt) EOS command API password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_password = +# Example: eapi_password = my_password +# +# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs +# ("node1.domain.com") or as short names ("node1"). This is +# optional. If not set, a value of "True" is assumed. +# +# use_fqdn = +# Example: use_fqdn = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# sync_interval = +# Example: sync_interval = 60 +# +# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller. +# This is useful when multiple OpenStack/Neutron controllers are +# managing the same Arista HW clusters. Note that this name must +# match with the region name registered (or known) to keystone +# service. Authentication with Keysotne is performed by EOS. +# This is optional. If not set, a value of "RegionOne" is assumed. +# +# region_name = +# Example: region_name = RegionOne + + +[l3_arista] + +# (StrOpt) primary host IP address. This is required field. If not set, all +# communications to Arista EOS will fail. This is the host where +# primary router is created. +# +# primary_l3_host = +# Example: primary_l3_host = 192.168.10.10 +# +# (StrOpt) Primary host username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# primary_l3_host_username = +# Example: arista_primary_l3_username = admin +# +# (StrOpt) Primary host password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# primary_l3_host_password = +# Example: primary_l3_password = my_password +# +# (StrOpt) IP address of the second Arista switch paired as +# MLAG (Multi-chassis Link Aggregation) with the first. +# This is optional field, however, if mlag_config flag is set, +# then this is a required field. If not set, all +# communications to Arista EOS will fail. If mlag_config is set +# to False, then this field is ignored +# +# seconadary_l3_host = +# Example: seconadary_l3_host = 192.168.10.20 +# +# (BoolOpt) Defines if Arista switches are configured in MLAG mode +# If yes, all L3 configuration is pushed to both switches +# automatically. If this flag is set, ensure that secondary_l3_host +# is set to the second switch's IP. +# This flag is Optional. If not set, a value of "False" is assumed. +# +# mlag_config = +# Example: mlag_config = True +# +# (BoolOpt) Defines if the router is created in default VRF or a +# a specific VRF. This is optional. +# If not set, a value of "False" is assumed. +# +# Example: use_vrf = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# l3_sync_interval = +# Example: l3_sync_interval = 60 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini new file mode 100644 index 00000000..67574110 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini @@ -0,0 +1,15 @@ +[ml2_brocade] +# username = +# password = +# address = +# ostype = NOS +# osversion = autodetect | n.n.n +# physical_networks = physnet1,physnet2 +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS +# osversion = 4.1.1 +# physical_networks = physnet1,physnet2 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini new file mode 100644 index 00000000..1b69100e --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini @@ -0,0 +1,118 @@ +[ml2_cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# +# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch. +# This string value must be present in the ml2_conf.ini network_vlan_ranges +# variable. +# +# managed_physical_network = +# Example: managed_physical_network = physnet1 + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# Cisco Nexus Switch Format. +# [ml2_mech_cisco_nexus:] +# = (1) +# ssh_port= (2) +# username= (3) +# password= (4) +# +# (1) For each host connected to a port on the switch, specify the hostname +# and the Nexus physical port (interface) it is connected to. +# Valid intf_type's are 'ethernet' and 'port-channel'. +# The default setting for is 'ethernet' and need not be +# added to this setting. +# (2) The TCP port for connecting via SSH to manage the switch. This is +# port number 22 unless the switch has been configured otherwise. +# (3) The username for logging into the switch to manage it. +# (4) The password for logging into the switch to manage it. +# +# Example: +# [ml2_mech_cisco_nexus:1.1.1.1] +# compute1=1/1 +# compute2=ethernet:1/2 +# compute3=port-channel:1 +# ssh_port=22 +# username=admin +# password=mySecretPassword + +[ml2_cisco_apic] + +# Hostname:port list of APIC controllers +# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80 + +# Username for the APIC controller +# apic_username = user + +# Password for the APIC controller +# apic_password = password + +# Whether use SSl for connecting to the APIC controller or not +# apic_use_ssl = True + +# How to map names to APIC: use_uuid or use_name +# apic_name_mapping = use_name + +# Names for APIC objects used by Neutron +# Note: When deploying multiple clouds against one APIC, +# these names must be unique between the clouds. +# apic_vmm_domain = openstack +# apic_vlan_ns_name = openstack_ns +# apic_node_profile = openstack_profile +# apic_entity_profile = openstack_entity +# apic_function_profile = openstack_function +# apic_app_profile_name = openstack_app +# Agent timers for State reporting and topology discovery +# apic_sync_interval = 30 +# apic_agent_report_interval = 30 +# apic_agent_poll_interval = 2 + +# Specify your network topology. +# This section indicates how your compute nodes are connected to the fabric's +# switches and ports. The format is as follows: +# +# [apic_switch:] +# , = +# +# You can have multiple sections, one for each switch in your fabric that is +# participating in Openstack. e.g. +# +# [apic_switch:17] +# ubuntu,ubuntu1 = 1/10 +# ubuntu2,ubuntu3 = 1/11 +# +# [apic_switch:18] +# ubuntu5,ubuntu6 = 1/1 +# ubuntu7,ubuntu8 = 1/2 + +# Describe external connectivity. +# In this section you can specify the external network configuration in order +# for the plugin to be able to teach the fabric how to route the internal +# traffic to the outside world. The external connectivity configuration +# format is as follows: +# +# [apic_external_network:] +# switch = +# port = +# encap = +# cidr_exposed = +# gateway_ip = +# +# An example follows: +# [apic_external_network:network_ext] +# switch=203 +# port=1/34 +# encap=vlan-100 +# cidr_exposed=10.10.40.2/16 +# gateway_ip=10.10.40.1 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini new file mode 100644 index 00000000..6ee4a4e0 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini @@ -0,0 +1,52 @@ +# Defines Configuration options for FSL SDN OS Mechanism Driver +# Cloud Resource Discovery (CRD) authorization credentials +[ml2_fslsdn] +#(StrOpt) User name for authentication to CRD. +# e.g.: user12 +# +# crd_user_name = + +#(StrOpt) Password for authentication to CRD. +# e.g.: secret +# +# crd_password = + +#(StrOpt) Tenant name for CRD service. +# e.g.: service +# +# crd_tenant_name = + +#(StrOpt) CRD auth URL. +# e.g.: http://127.0.0.1:5000/v2.0/ +# +# crd_auth_url = + +#(StrOpt) URL for connecting to CRD Service. +# e.g.: http://127.0.0.1:9797 +# +# crd_url= + +#(IntOpt) Timeout value for connecting to CRD service +# in seconds, e.g.: 30 +# +# crd_url_timeout= + +#(StrOpt) Region name for connecting to CRD in +# admin context, e.g.: RegionOne +# +# crd_region_name= + +#(BoolOpt)If set, ignore any SSL validation issues (boolean value) +# e.g.: False +# +# crd_api_insecure= + +#(StrOpt)Authorization strategy for connecting to CRD in admin +# context, e.g.: keystone +# +# crd_auth_strategy= + +#(StrOpt)Location of CA certificates file to use for CRD client +# requests. +# +# crd_ca_certificates_file= diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini new file mode 100644 index 00000000..46139aed --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini @@ -0,0 +1,4 @@ +[eswitch] +# (StrOpt) Type of Network Interface to allocate for VM: +# mlnx_direct or hostdev according to libvirt terminology +# vnic_type = mlnx_direct diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini new file mode 100644 index 00000000..dbbfcbd2 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini @@ -0,0 +1,28 @@ +# Defines configuration options specific to the Tail-f NCS Mechanism Driver + +[ml2_ncs] +# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack +# subtree. +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://ncs/api/running/services/openstack + +# (StrOpt) Username for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = +# Example: timeout = 15 + diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini new file mode 100644 index 00000000..9e88c1bb --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini @@ -0,0 +1,30 @@ +# Configuration for the OpenDaylight MechanismDriver + +[ml2_odl] +# (StrOpt) OpenDaylight REST URL +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron + +# (StrOpt) Username for HTTP basic authentication to ODL. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to ODL. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = 10 +# Example: timeout = 15 + +# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout. +# This is an optional parameter, default value is 30 minutes. +# +# session_timeout = 30 +# Example: session_timeout = 60 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini new file mode 100644 index 00000000..4a94b987 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini @@ -0,0 +1,13 @@ +# Defines configuration options specific to the OpenFlow Agent Mechanism Driver + +[ovs] +# Please refer to configuration options to the OpenvSwitch + +[agent] +# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath. +# This is an optional parameter, default value is 60 seconds. +# +# get_datapath_retry_times = +# Example: get_datapath_retry_times = 30 + +# Please refer to configuration options to the OpenvSwitch else the above. diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini new file mode 100644 index 00000000..9566f54c --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini @@ -0,0 +1,31 @@ +# Defines configuration options for SRIOV NIC Switch MechanismDriver +# and Agent + +[ml2_sriov] +# (ListOpt) Comma-separated list of +# supported Vendor PCI Devices, in format vendor_id:product_id +# +# supported_pci_vendor_devs = 15b3:1004, 8086:10c9 +# Example: supported_pci_vendor_devs = 15b3:1004 +# +# (BoolOpt) Requires running SRIOV neutron agent for port binding +# agent_required = True + +[sriov_nic] +# (ListOpt) Comma-separated list of : +# tuples mapping physical network names to the agent's node-specific +# physical network device interfaces of SR-IOV physical function to be used +# for VLAN networks. All physical networks listed in network_vlan_ranges on +# the server should have mappings to appropriate interfaces on each agent. +# +# physical_device_mappings = +# Example: physical_device_mappings = physnet1:eth1 +# +# (ListOpt) Comma-separated list of : +# tuples, mapping network_device to the agent's node-specific list of virtual +# functions that should not be used for virtual networking. +# vfs_to_exclude is a semicolon-separated list of virtual +# functions to exclude from network_device. The network_device in the +# mapping should appear in the physical_device_mappings list. +# exclude_devices = +# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini new file mode 100644 index 00000000..b1225111 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini @@ -0,0 +1,79 @@ +[mlnx] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value is 'vlan' You MUST configure network_vlan_ranges below +# in order for tenant networks to provide connectivity between hosts. +# Set to 'none' to disable creation of tenant networks. +# +# tenant_network_type = vlan +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = default:1:100 + +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to physical network types. All physical +# networks listed in network_vlan_ranges should have +# mappings to appropriate physical network type. +# Type of the physical network can be either eth (Ethernet) or +# ib (InfiniBand). If empty, physical network eth type is assumed. +# +# physical_network_type_mappings = +# Example: physical_network_type_mappings = default:eth + +# (StrOpt) Type of the physical network, can be either 'eth' or 'ib' +# The default value is 'eth' +# physical_network_type = eth + +[eswitch] +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to the agent's node-specific physical network +# interfaces to be used for flat and VLAN networks. All physical +# networks listed in network_vlan_ranges on the server should have +# mappings to appropriate interfaces on each agent. +# +# physical_interface_mappings = +# Example: physical_interface_mappings = default:eth2 + +# (StrOpt) Type of Network Interface to allocate for VM: +# direct or hosdev according to libvirt terminology +# vnic_type = mlnx_direct + +# (StrOpt) Eswitch daemon end point connection url +# daemon_endpoint = 'tcp://127.0.0.1:60001' + +# The number of milliseconds the agent will wait for +# response on request to daemon +# request_timeout = 3000 + +# The number of retries the agent will send request +# to daemon before giving up +# retries = 3 + +# The backoff rate multiplier for waiting period between retries +# on request to daemon, i.e. value of 2 will double +# the request timeout each retry +# backoff_rate = 2 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (BoolOpt) Enable server RPC compatibility with old (pre-havana) +# agents. +# +# rpc_support_old_agents = False + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini new file mode 100644 index 00000000..aa4171da --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini @@ -0,0 +1,60 @@ +# Sample Configurations + +[ovs] +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch port". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# integration_bridge = br-int + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +[securitygroup] +# Firewall driver for realizing neutron security group function +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[ofc] +# Specify OpenFlow Controller Host, Port and Driver to connect. +# host = 127.0.0.1 +# port = 8888 + +# Base URL of OpenFlow Controller REST API. +# It is prepended to a path of each API request. +# path_prefix = + +# Drivers are in neutron/plugins/nec/drivers/ . +# driver = trema + +# PacketFilter is available when it's enabled in this configuration +# and supported by the driver. +# enable_packet_filter = true + +# Use SSL to connect +# use_ssl = false + +# Key file +# key_file = + +# Certificate file +# cert_file = + +# Disable SSL certificate verification +# insecure_ssl = false + +# Maximum attempts per OFC API request. NEC plugin retries +# API request to OFC when OFC returns ServiceUnavailable (503). +# The value must be greater than 0. +# api_max_attempts = 3 + +[provider] +# Default router provider to use. +# default_router_provider = l3-agent +# List of enabled router providers. +# router_providers = l3-agent,openflow diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini new file mode 100644 index 00000000..aad37bd5 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini @@ -0,0 +1,41 @@ +# Please fill in the correct data for all the keys below and uncomment key-value pairs +[restproxy] +# (StrOpt) Default Network partition in which VSD will +# orchestrate network resources using openstack +# +#default_net_partition_name = + +# (StrOpt) Nuage provided uri for initial authorization to +# access VSD +# +#auth_resource = /auth + +# (StrOpt) IP Address and Port of VSD +# +#server = ip:port + +# (StrOpt) Organization name in which VSD will orchestrate +# network resources using openstack +# +#organization = org + +# (StrOpt) Username and password of VSD for authentication +# +#serverauth = uname:pass + +# (BoolOpt) Boolean for SSL connection with VSD server +# +#serverssl = True + +# (StrOpt) Nuage provided base uri to reach out to VSD +# +#base_uri = /base + +[syncmanager] +# (BoolOpt) Boolean to enable sync between openstack and VSD +# +#enable_sync = False + +# (IntOpt) Sync interval in seconds between openstack and VSD +# +#sync_interval = 0 \ No newline at end of file diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini new file mode 100644 index 00000000..a1c05d97 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini @@ -0,0 +1,35 @@ +[nvsd] +# Configure the NVSD controller. The plugin proxies the api calls using +# to NVSD controller which implements the required functionality. + +# IP address of NVSD controller api server +# nvsd_ip = + +# Port number of NVSD controller api server +# nvsd_port = 8082 + +# Authentication credentials to access the api server +# nvsd_user = +# nvsd_passwd = + +# API request timeout in seconds +# request_timeout = + +# Maximum number of retry attempts to login to the NVSD controller +# Specify 0 to retry until success (default) +# nvsd_retries = 0 + +[securitygroup] +# Specify firewall_driver option, if neutron security groups are disabled, +# then NoopFirewallDriver otherwise OVSHybridIptablesFirewallDriver. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[agent] +# root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + +[database] +# connection = mysql://root:@127.0.0.1/?charset=utf8 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini new file mode 100644 index 00000000..629f1fc4 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini @@ -0,0 +1,26 @@ +# OpenContrail is an Apache 2.0-licensed project that is built using +# standards-based protocols and provides all the necessary components for +# network virtualization–SDN controller, virtual router, analytics engine, +# and published northbound APIs +# For more information visit: http://opencontrail.org + +# Opencontrail plugin specific configuration +[CONTRAIL] +# (StrOpt) IP address to connect to opencontrail controller. +# Uncomment this line for specifying the IP address of the opencontrail +# Api-Server. +# Default value is local host(127.0.0.1). +# api_server_ip='127.0.0.1' + +# (IntOpt) port to connect to opencontrail controller. +# Uncomment this line for the specifying the Port of the opencontrail +# Api-Server. +# Default value is 8082 +# api_server_port=8082 + +# (DictOpt) enable opencontrail extensions +# Opencontrail in future would support extension such as ipam, policy, +# these extensions can be configured as shown below. Plugin will then +# load the specified extensions. +# Default value is None, it wont load any extension +# contrail_extensions=ipam:,policy: diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini new file mode 100644 index 00000000..9c8e6b58 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini @@ -0,0 +1,190 @@ +[ovs] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST either change this +# to 'vlan' and configure network_vlan_ranges below or change this to +# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for +# tenant networks to provide connectivity between hosts. Set to 'none' +# to disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = gre +# Example: tenant_network_type = vxlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only gre, vxlan and local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +# (BoolOpt) Set to True in the server and the agents to enable support +# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and +# GRE or VXLAN tunneling. +# +# WARNING: This option will be deprecated in the Icehouse release, at which +# point setting tunnel_type below will be required to enable +# tunneling. +# +# enable_tunneling = False + +# (StrOpt) The type of tunnel network, if any, supported by the plugin. If +# this is set, it will cause tunneling to be enabled. If this is not set and +# the option enable_tunneling is set, this will default to 'gre'. +# +# tunnel_type = +# Example: tunnel_type = gre +# Example: tunnel_type = vxlan + +# (ListOpt) Comma-separated list of : tuples +# enumerating ranges of GRE or VXLAN tunnel IDs that are available for +# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'. +# +# tunnel_id_ranges = +# Example: tunnel_id_ranges = 1:1000 + +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch bay". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# +# integration_bridge = br-int + +# Only used for the agent if tunnel_id_ranges (above) is not empty for +# the server. In most cases, the default value should be fine. +# +# tunnel_bridge = br-tun + +# Peer patch port in integration bridge for tunnel bridge +# int_peer_patch_port = patch-tun + +# Peer patch port in tunnel bridge for integration bridge +# tun_peer_patch_port = patch-int + +# Uncomment this line for the agent if tunnel_id_ranges (above) is not +# empty for the server. Set local-ip to be the local IP address of +# this hypervisor. +# +# local_ip = + +# (ListOpt) Comma-separated list of : tuples +# mapping physical network names to the agent's node-specific OVS +# bridge names to be used for flat and VLAN networks. The length of +# bridge names should be no more than 11. Each bridge must +# exist, and should have a physical network interface configured as a +# port. All physical networks listed in network_vlan_ranges on the +# server should have mappings to appropriate bridges on each agent. +# +# bridge_mappings = +# Example: bridge_mappings = physnet1:br-eth1 + +# (BoolOpt) Use veths instead of patch ports to interconnect the integration +# bridge to physical networks. Support kernel without ovs patch port support +# so long as it is set to True. +# use_veth_interconnection = False + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# Minimize polling by monitoring ovsdb for interface changes +# minimize_polling = True + +# When minimize_polling = True, the number of seconds to wait before +# respawning the ovsdb monitor after losing communication with it +# ovsdb_monitor_respawn_interval = 30 + +# (ListOpt) The types of tenant network tunnels supported by the agent. +# Setting this will enable tunneling support in the agent. This can be set to +# either 'gre' or 'vxlan'. If this is unset, it will default to [] and +# disable tunneling support in the agent. When running the agent with the OVS +# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section. +# When running the agent with ML2, you can specify as many values here as +# your compute hosts supports. +# +# tunnel_types = +# Example: tunnel_types = gre +# Example: tunnel_types = vxlan +# Example: tunnel_types = vxlan, gre + +# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By +# default, this will make use of the Open vSwitch default value of '4789' if +# not specified. +# +# vxlan_udp_port = +# Example: vxlan_udp_port = 8472 + +# (IntOpt) This is the MTU size of veth interfaces. +# Do not change unless you have a good reason to. +# The default MTU size of veth interfaces is 1500. +# This option has no effect if use_veth_interconnection is False +# veth_mtu = +# Example: veth_mtu = 1504 + +# (BoolOpt) Flag to enable l2-population extension. This option should only be +# used in conjunction with ml2 plugin and l2population mechanism driver. It'll +# enable plugin to populate remote ports macs and IPs (using fdb_add/remove +# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to +# optimize tunnel management. +# +# l2_population = False + +# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2 +# population ML2 MechanismDriver. +# +# arp_responder = False + +# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet +# carrying GRE/VXLAN tunnel. The default value is True. +# +# dont_fragment = True + +# (BoolOpt) Set to True on L2 agents to enable support +# for distributed virtual routing. +# +# enable_distributed_routing = False + +[securitygroup] +# Firewall driver for realizing neutron security group function. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# 1. With VLANs on eth1. +# [ovs] +# network_vlan_ranges = default:2000:3999 +# tunnel_id_ranges = +# integration_bridge = br-int +# bridge_mappings = default:br-eth1 +# +# 2. With GRE tunneling. +# [ovs] +# network_vlan_ranges = +# tunnel_id_ranges = 1:1000 +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 +# +# 3. With VXLAN tunneling. +# [ovs] +# network_vlan_ranges = +# tenant_network_type = vxlan +# tunnel_type = vxlan +# tunnel_id_ranges = 1:1000 +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 +# [agent] +# tunnel_types = vxlan diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini new file mode 100644 index 00000000..bfe8062a --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini @@ -0,0 +1,14 @@ +# Config file for Neutron PLUMgrid Plugin + +[plumgriddirector] +# This line should be pointing to the PLUMgrid Director, +# for the PLUMgrid platform. +# director_server= +# director_server_port= +# Authentification parameters for the Director. +# These are the admin credentials to manage and control +# the PLUMgrid Director server. +# username= +# password= +# servertimeout=5 +# driver= diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini new file mode 100644 index 00000000..9d9cfa25 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini @@ -0,0 +1,44 @@ +[ovs] +# integration_bridge = br-int + +# openflow_rest_api = : +# openflow_rest_api = 127.0.0.1:8080 + +# tunnel key range: 0 < tunnel_key_min < tunnel_key_max +# VLAN: 12bits, GRE, VXLAN: 24bits +# tunnel_key_min = 1 +# tunnel_key_max = 0xffffff + +# tunnel_ip = +# tunnel_interface = interface for tunneling +# when tunnel_ip is NOT specified, ip address is read +# from this interface +# tunnel_ip = +# tunnel_interface = +tunnel_interface = eth0 + +# ovsdb_port = port number on which ovsdb is listening +# ryu-agent uses this parameter to setup ovsdb. +# ovs-vsctl set-manager ptcp: +# See set-manager section of man ovs-vsctl for details. +# currently ptcp is only supported. +# ovsdb_ip = +# ovsdb_interface = interface for ovsdb +# when ovsdb_addr NOT specifiied, ip address is gotten +# from this interface +# ovsdb_port = 6634 +# ovsdb_ip = +# ovsdb_interface = +ovsdb_interface = eth0 + +[securitygroup] +# Firewall driver for realizing neutron security group function +# firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini new file mode 100644 index 00000000..baca73b8 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini @@ -0,0 +1,200 @@ +[DEFAULT] +# User name for NSX controller +# nsx_user = admin + +# Password for NSX controller +# nsx_password = admin + +# Time before aborting a request on an unresponsive controller (Seconds) +# http_timeout = 75 + +# Maximum number of times a particular request should be retried +# retries = 2 + +# Maximum number of times a redirect response should be followed +# redirects = 2 + +# Comma-separated list of NSX controller endpoints (:). When port +# is omitted, 443 is assumed. This option MUST be specified, e.g.: +# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80 + +# UUID of the pre-existing default NSX Transport zone to be used for creating +# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.: +# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53 + +# (Optional) UUID for the default l3 gateway service to use with this cluster. +# To be specified if planning to use logical routers with external gateways. +# default_l3_gw_service_uuid = + +# (Optional) UUID for the default l2 gateway service to use with this cluster. +# To be specified for providing a predefined gateway tenant for connecting their networks. +# default_l2_gw_service_uuid = + +# (Optional) UUID for the default service cluster. A service cluster is introduced to +# represent a group of gateways and it is needed in order to use Logical Services like +# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this +# config parameter *MUST BE* set to a valid pre-existent service cluster uuid. +# default_service_cluster_uuid = + +# Name of the default interface name to be used on network-gateway. This value +# will be used for any device associated with a network gateway for which an +# interface name was not specified +# default_interface_name = breth0 + +[quotas] +# number of network gateways allowed per tenant, -1 means unlimited +# quota_network_gateway = 5 + +[vcns] +# URL for VCNS manager +# manager_uri = https://management_ip + +# User name for VCNS manager +# user = admin + +# Password for VCNS manager +# password = default + +# (Optional) Datacenter ID for Edge deployment +# datacenter_moid = + +# (Optional) Deployment Container ID for NSX Edge deployment +# If not specified, either a default global container will be used, or +# the resource pool and datastore specified below will be used +# deployment_container_id = + +# (Optional) Resource pool ID for NSX Edge deployment +# resource_pool_id = + +# (Optional) Datastore ID for NSX Edge deployment +# datastore_id = + +# (Required) UUID of logic switch for physical network connectivity +# external_network = + +# (Optional) Asynchronous task status check interval +# default is 2000 (millisecond) +# task_status_check_interval = 2000 + +[nsx] +# Maximum number of ports for each bridged logical switch +# The recommended value for this parameter varies with NSX version +# Please use: +# NSX 2.x -> 64 +# NSX 3.0, 3.1 -> 5000 +# NSX 3.2 -> 10000 +# max_lp_per_bridged_ls = 5000 + +# Maximum number of ports for each overlay (stt, gre) logical switch +# max_lp_per_overlay_ls = 256 + +# Number of connections to each controller node. +# default is 10 +# concurrent_connections = 10 + +# Number of seconds a generation id should be valid for (default -1 meaning do not time out) +# nsx_gen_timeout = -1 + +# Acceptable values for 'metadata_mode' are: +# - 'access_network': this enables a dedicated connection to the metadata +# proxy for metadata server access via Neutron router. +# - 'dhcp_host_route': this enables host route injection via the dhcp agent. +# This option is only useful if running on a host that does not support +# namespaces otherwise access_network should be used. +# metadata_mode = access_network + +# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt) +# default_transport_type = stt + +# Specifies in which mode the plugin needs to operate in order to provide DHCP and +# metadata proxy services to tenant instances. If 'agent' is chosen (default) +# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to +# provide such services. In this mode, the plugin supports API extensions 'agent' +# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse), +# the plugin will use NSX logical services for DHCP and metadata proxy. This +# simplifies the deployment model for Neutron, in that the plugin no longer requires +# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode +# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above. +# Furthermore, a 'combined' mode is also provided and is used to support existing +# deployments that want to adopt the agentless mode going forward. With this mode, +# existing networks keep being served by the existing infrastructure (thus preserving +# backward compatibility, whereas new networks will be served by the new infrastructure. +# Migration tools are provided to 'move' one network from one model to another; with +# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is +# ignored, as new networks will no longer be scheduled to existing dhcp agents. +# agent_mode = agent + +# Specifies which mode packet replication should be done in. If set to service +# a service node is required in order to perform packet replication. This can +# also be set to source if one wants replication to be performed locally (NOTE: +# usually only useful for testing if one does not want to deploy a service node). +# In order to leverage distributed routers, replication_mode should be set to +# "service". +# replication_mode = service + +[nsx_sync] +# Interval in seconds between runs of the status synchronization task. +# The plugin will aim at resynchronizing operational status for all +# resources in this interval, and it should be therefore large enough +# to ensure the task is feasible. Otherwise the plugin will be +# constantly synchronizing resource status, ie: a new task is started +# as soon as the previous is completed. +# If this value is set to 0, the state synchronization thread for this +# Neutron instance will be disabled. +# state_sync_interval = 10 + +# Random additional delay between two runs of the state synchronization task. +# An additional wait time between 0 and max_random_sync_delay seconds +# will be added on top of state_sync_interval. +# max_random_sync_delay = 0 + +# Minimum delay, in seconds, between two status synchronization requests for NSX. +# Depending on chunk size, controller load, and other factors, state +# synchronization requests might be pretty heavy. This means the +# controller might take time to respond, and its load might be quite +# increased by them. This parameter allows to specify a minimum +# interval between two subsequent requests. +# The value for this parameter must never exceed state_sync_interval. +# If this does, an error will be raised at startup. +# min_sync_req_delay = 1 + +# Minimum number of resources to be retrieved from NSX in a single status +# synchronization request. +# The actual size of the chunk will increase if the number of resources is such +# that using the minimum chunk size will cause the interval between two +# requests to be less than min_sync_req_delay +# min_chunk_size = 500 + +# Enable this option to allow punctual state synchronization on show +# operations. In this way, show operations will always fetch the operational +# status of the resource from the NSX backend, and this might have +# a considerable impact on overall performance. +# always_read_status = False + +[nsx_lsn] +# Pull LSN information from NSX in case it is missing from the local +# data store. This is useful to rebuild the local store in case of +# server recovery +# sync_on_missing_data = False + +[nsx_dhcp] +# (Optional) Comma separated list of additional dns servers. Default is an empty list +# extra_domain_name_servers = + +# Domain to use for building the hostnames +# domain_name = openstacklocal + +# Default DHCP lease time +# default_lease_time = 43200 + +[nsx_metadata] +# IP address used by Metadata server +# metadata_server_address = 127.0.0.1 + +# TCP Port used by Metadata server +# metadata_server_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it MUST match with the configuration used by the Metadata server +# metadata_shared_secret = diff --git a/install-files/openstack/usr/share/openstack/neutron/policy.json b/install-files/openstack/usr/share/openstack/neutron/policy.json new file mode 100644 index 00000000..e7db4357 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/policy.json @@ -0,0 +1,138 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", + "admin_only": "rule:context_is_admin", + "regular_user": "", + "shared": "field:networks:shared=True", + "shared_firewalls": "field:firewalls:shared=True", + "external": "field:networks:router:external=True", + "default": "rule:admin_or_owner", + + "create_subnet": "rule:admin_or_network_owner", + "get_subnet": "rule:admin_or_owner or rule:shared", + "update_subnet": "rule:admin_or_network_owner", + "delete_subnet": "rule:admin_or_network_owner", + + "create_network": "", + "get_network": "rule:admin_or_owner or rule:shared or rule:external", + "get_network:router:external": "rule:regular_user", + "get_network:segments": "rule:admin_only", + "get_network:provider:network_type": "rule:admin_only", + "get_network:provider:physical_network": "rule:admin_only", + "get_network:provider:segmentation_id": "rule:admin_only", + "get_network:queue_id": "rule:admin_only", + "create_network:shared": "rule:admin_only", + "create_network:router:external": "rule:admin_only", + "create_network:segments": "rule:admin_only", + "create_network:provider:network_type": "rule:admin_only", + "create_network:provider:physical_network": "rule:admin_only", + "create_network:provider:segmentation_id": "rule:admin_only", + "update_network": "rule:admin_or_owner", + "update_network:segments": "rule:admin_only", + "update_network:shared": "rule:admin_only", + "update_network:provider:network_type": "rule:admin_only", + "update_network:provider:physical_network": "rule:admin_only", + "update_network:provider:segmentation_id": "rule:admin_only", + "update_network:router:external": "rule:admin_only", + "delete_network": "rule:admin_or_owner", + + "create_port": "", + "create_port:mac_address": "rule:admin_or_network_owner", + "create_port:fixed_ips": "rule:admin_or_network_owner", + "create_port:port_security_enabled": "rule:admin_or_network_owner", + "create_port:binding:host_id": "rule:admin_only", + "create_port:binding:profile": "rule:admin_only", + "create_port:mac_learning_enabled": "rule:admin_or_network_owner", + "get_port": "rule:admin_or_owner", + "get_port:queue_id": "rule:admin_only", + "get_port:binding:vif_type": "rule:admin_only", + "get_port:binding:vif_details": "rule:admin_only", + "get_port:binding:host_id": "rule:admin_only", + "get_port:binding:profile": "rule:admin_only", + "update_port": "rule:admin_or_owner", + "update_port:fixed_ips": "rule:admin_or_network_owner", + "update_port:port_security_enabled": "rule:admin_or_network_owner", + "update_port:binding:host_id": "rule:admin_only", + "update_port:binding:profile": "rule:admin_only", + "update_port:mac_learning_enabled": "rule:admin_or_network_owner", + "delete_port": "rule:admin_or_owner", + + "get_router:ha": "rule:admin_only", + "create_router": "rule:regular_user", + "create_router:external_gateway_info:enable_snat": "rule:admin_only", + "create_router:distributed": "rule:admin_only", + "create_router:ha": "rule:admin_only", + "get_router": "rule:admin_or_owner", + "get_router:distributed": "rule:admin_only", + "update_router:external_gateway_info:enable_snat": "rule:admin_only", + "update_router:distributed": "rule:admin_only", + "update_router:ha": "rule:admin_only", + "delete_router": "rule:admin_or_owner", + + "add_router_interface": "rule:admin_or_owner", + "remove_router_interface": "rule:admin_or_owner", + + "create_firewall": "", + "get_firewall": "rule:admin_or_owner", + "create_firewall:shared": "rule:admin_only", + "get_firewall:shared": "rule:admin_only", + "update_firewall": "rule:admin_or_owner", + "update_firewall:shared": "rule:admin_only", + "delete_firewall": "rule:admin_or_owner", + + "create_firewall_policy": "", + "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", + "create_firewall_policy:shared": "rule:admin_or_owner", + "update_firewall_policy": "rule:admin_or_owner", + "delete_firewall_policy": "rule:admin_or_owner", + + "create_firewall_rule": "", + "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", + "update_firewall_rule": "rule:admin_or_owner", + "delete_firewall_rule": "rule:admin_or_owner", + + "create_qos_queue": "rule:admin_only", + "get_qos_queue": "rule:admin_only", + + "update_agent": "rule:admin_only", + "delete_agent": "rule:admin_only", + "get_agent": "rule:admin_only", + + "create_dhcp-network": "rule:admin_only", + "delete_dhcp-network": "rule:admin_only", + "get_dhcp-networks": "rule:admin_only", + "create_l3-router": "rule:admin_only", + "delete_l3-router": "rule:admin_only", + "get_l3-routers": "rule:admin_only", + "get_dhcp-agents": "rule:admin_only", + "get_l3-agents": "rule:admin_only", + "get_loadbalancer-agent": "rule:admin_only", + "get_loadbalancer-pools": "rule:admin_only", + + "create_floatingip": "rule:regular_user", + "update_floatingip": "rule:admin_or_owner", + "delete_floatingip": "rule:admin_or_owner", + "get_floatingip": "rule:admin_or_owner", + + "create_network_profile": "rule:admin_only", + "update_network_profile": "rule:admin_only", + "delete_network_profile": "rule:admin_only", + "get_network_profiles": "", + "get_network_profile": "", + "update_policy_profiles": "rule:admin_only", + "get_policy_profiles": "", + "get_policy_profile": "", + + "create_metering_label": "rule:admin_only", + "delete_metering_label": "rule:admin_only", + "get_metering_label": "rule:admin_only", + + "create_metering_label_rule": "rule:admin_only", + "delete_metering_label_rule": "rule:admin_only", + "get_metering_label_rule": "rule:admin_only", + + "get_service_provider": "rule:regular_user", + "get_lsn": "rule:admin_only", + "create_lsn": "rule:admin_only" +} diff --git a/install-files/openstack/usr/share/openstack/neutron/vpn_agent.ini b/install-files/openstack/usr/share/openstack/neutron/vpn_agent.ini new file mode 100644 index 00000000..c3089df9 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/neutron/vpn_agent.ini @@ -0,0 +1,14 @@ +[DEFAULT] +# VPN-Agent configuration file +# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also + +[vpnagent] +# vpn device drivers which vpn agent will use +# If we want to use multiple drivers, we need to define this option multiple times. +# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver +# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver +# vpn_device_driver=another_driver + +[ipsec] +# Status check interval +# ipsec_status_check_interval=60 diff --git a/install-files/openstack/usr/share/openstack/nova-config.yml b/install-files/openstack/usr/share/openstack/nova-config.yml new file mode 100644 index 00000000..4f43db39 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/nova-config.yml @@ -0,0 +1,34 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/nova.conf" + tasks: + - name: Create the nova user. + user: + name: nova + comment: Openstack Nova Daemons + shell: /sbin/nologin + home: /var/lib/nova + groups: libvirt + append: yes + + - name: Create the /var folders for nova + file: + path: "{{ item }}" + state: directory + owner: nova + group: nova + with_items: + - /var/run/nova + - /var/lock/nova + - /var/log/nova + - /var/lib/nova + - /var/lib/nova/instances + + - file: path=/etc/nova state=directory + - name: Add the configuration needed for nova in /etc/nova using templates + template: + src: /usr/share/openstack/nova/{{ item }} + dest: /etc/nova/{{ item }} + with_lines: + - cd /usr/share/openstack/nova && find -type f diff --git a/install-files/openstack/usr/share/openstack/nova-db.yml b/install-files/openstack/usr/share/openstack/nova-db.yml new file mode 100644 index 00000000..e7dc5b10 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/nova-db.yml @@ -0,0 +1,51 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/nova.conf" + tasks: + - name: Create nova service user in service tenant + keystone_user: + user: "{{ NOVA_SERVICE_USER }}" + password: "{{ NOVA_SERVICE_PASSWORD }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Assign admin role to nova service user in the service tenant + keystone_user: + role: admin + user: "{{ NOVA_SERVICE_USER }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Add nova endpoint + keystone_service: + name: nova + type: compute + description: Openstack Compute Service + publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2/%(tenant_id)s' + internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2/%(tenant_id)s' + adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2/%(tenant_id)s' + region: 'regionOne' + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - name: Create postgresql user for nova + postgresql_user: + name: "{{ NOVA_DB_USER }}" + login_host: "{{ CONTROLLER_HOST_ADDRESS }}" + password: "{{ NOVA_DB_PASSWORD }}" + sudo: yes + sudo_user: nova + + - name: Create database for nova services + postgresql_db: + name: nova + owner: "{{ NOVA_DB_USER }}" + login_host: "{{ CONTROLLER_HOST_ADDRESS }}" + sudo: yes + sudo_user: nova + + - name: Initiate nova database + nova_manage: + action: dbsync + sudo: yes + sudo_user: nova diff --git a/install-files/openstack/usr/share/openstack/nova/api-paste.ini b/install-files/openstack/usr/share/openstack/nova/api-paste.ini new file mode 100644 index 00000000..2a825a5b --- /dev/null +++ b/install-files/openstack/usr/share/openstack/nova/api-paste.ini @@ -0,0 +1,118 @@ +############ +# Metadata # +############ +[composite:metadata] +use = egg:Paste#urlmap +/: meta + +[pipeline:meta] +pipeline = ec2faultwrap logrequest metaapp + +[app:metaapp] +paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory + +####### +# EC2 # +####### + +[composite:ec2] +use = egg:Paste#urlmap +/services/Cloud: ec2cloud + +[composite:ec2cloud] +use = call:nova.api.auth:pipeline_factory +noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor +keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor + +[filter:ec2faultwrap] +paste.filter_factory = nova.api.ec2:FaultWrapper.factory + +[filter:logrequest] +paste.filter_factory = nova.api.ec2:RequestLogging.factory + +[filter:ec2lockout] +paste.filter_factory = nova.api.ec2:Lockout.factory + +[filter:ec2keystoneauth] +paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory + +[filter:ec2noauth] +paste.filter_factory = nova.api.ec2:NoAuth.factory + +[filter:cloudrequest] +controller = nova.api.ec2.cloud.CloudController +paste.filter_factory = nova.api.ec2:Requestify.factory + +[filter:authorizer] +paste.filter_factory = nova.api.ec2:Authorizer.factory + +[filter:validator] +paste.filter_factory = nova.api.ec2:Validator.factory + +[app:ec2executor] +paste.app_factory = nova.api.ec2:Executor.factory + +############# +# OpenStack # +############# + +[composite:osapi_compute] +use = call:nova.api.openstack.urlmap:urlmap_factory +/: oscomputeversions +/v1.1: openstack_compute_api_v2 +/v2: openstack_compute_api_v2 +/v3: openstack_compute_api_v3 + +[composite:openstack_compute_api_v2] +use = call:nova.api.auth:pipeline_factory +noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2 +keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2 +keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2 + +[composite:openstack_compute_api_v3] +use = call:nova.api.auth:pipeline_factory_v3 +noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3 +keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3 + +[filter:request_id] +paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:compute_req_id] +paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory + +[filter:faultwrap] +paste.filter_factory = nova.api.openstack:FaultWrapper.factory + +[filter:noauth] +paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory + +[filter:noauth_v3] +paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory + +[filter:ratelimit] +paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory + +[app:osapi_compute_app_v2] +paste.app_factory = nova.api.openstack.compute:APIRouter.factory + +[app:osapi_compute_app_v3] +paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory + +[pipeline:oscomputeversions] +pipeline = faultwrap oscomputeversionapp + +[app:oscomputeversionapp] +paste.app_factory = nova.api.openstack.compute.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/install-files/openstack/usr/share/openstack/nova/cells.json b/install-files/openstack/usr/share/openstack/nova/cells.json new file mode 100644 index 00000000..cc74930d --- /dev/null +++ b/install-files/openstack/usr/share/openstack/nova/cells.json @@ -0,0 +1,26 @@ +{ + "parent": { + "name": "parent", + "api_url": "http://api.example.com:8774", + "transport_url": "rabbit://rabbit.example.com", + "weight_offset": 0.0, + "weight_scale": 1.0, + "is_parent": true + }, + "cell1": { + "name": "cell1", + "api_url": "http://api.example.com:8774", + "transport_url": "rabbit://rabbit1.example.com", + "weight_offset": 0.0, + "weight_scale": 1.0, + "is_parent": false + }, + "cell2": { + "name": "cell2", + "api_url": "http://api.example.com:8774", + "transport_url": "rabbit://rabbit2.example.com", + "weight_offset": 0.0, + "weight_scale": 1.0, + "is_parent": false + } +} diff --git a/install-files/openstack/usr/share/openstack/nova/logging.conf b/install-files/openstack/usr/share/openstack/nova/logging.conf new file mode 100644 index 00000000..5482a040 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/nova/logging.conf @@ -0,0 +1,81 @@ +[loggers] +keys = root, nova + +[handlers] +keys = stderr, stdout, watchedfile, syslog, null + +[formatters] +keys = context, default + +[logger_root] +level = WARNING +handlers = null + +[logger_nova] +level = INFO +handlers = stderr +qualname = nova + +[logger_amqp] +level = WARNING +handlers = stderr +qualname = amqp + +[logger_amqplib] +level = WARNING +handlers = stderr +qualname = amqplib + +[logger_sqlalchemy] +level = WARNING +handlers = stderr +qualname = sqlalchemy +# "level = INFO" logs SQL queries. +# "level = DEBUG" logs SQL queries and results. +# "level = WARNING" logs neither. (Recommended for production systems.) + +[logger_boto] +level = WARNING +handlers = stderr +qualname = boto + +[logger_suds] +level = INFO +handlers = stderr +qualname = suds + +[logger_eventletwsgi] +level = WARNING +handlers = stderr +qualname = eventlet.wsgi.server + +[handler_stderr] +class = StreamHandler +args = (sys.stderr,) +formatter = context + +[handler_stdout] +class = StreamHandler +args = (sys.stdout,) +formatter = context + +[handler_watchedfile] +class = handlers.WatchedFileHandler +args = ('nova.log',) +formatter = context + +[handler_syslog] +class = handlers.SysLogHandler +args = ('/dev/log', handlers.SysLogHandler.LOG_USER) +formatter = context + +[handler_null] +class = nova.openstack.common.log.NullHandler +formatter = default +args = () + +[formatter_context] +class = nova.openstack.common.log.ContextFormatter + +[formatter_default] +format = %(message)s diff --git a/install-files/openstack/usr/share/openstack/nova/nova-compute.conf b/install-files/openstack/usr/share/openstack/nova/nova-compute.conf new file mode 100644 index 00000000..8d186211 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/nova/nova-compute.conf @@ -0,0 +1,4 @@ +[DEFAULT] +compute_driver={{ COMPUTE_DRIVER }} +[libvirt] +virt_type={{ NOVA_VIRT_TYPE }} diff --git a/install-files/openstack/usr/share/openstack/nova/nova.conf b/install-files/openstack/usr/share/openstack/nova/nova.conf new file mode 100644 index 00000000..43343cdd --- /dev/null +++ b/install-files/openstack/usr/share/openstack/nova/nova.conf @@ -0,0 +1,3809 @@ +[DEFAULT] + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer +# value) +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +rabbit_host={{ RABBITMQ_HOST }} + +# The RabbitMQ broker port where a single node is used. +# (integer value) +rabbit_port={{ RABBITMQ_PORT }} + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid={{ RABBITMQ_USER }} + +# The RabbitMQ password. (string value) +rabbit_password={{ RABBITMQ_PASSWORD }} + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=nova + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +notification_driver=messagingv2 + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in nova.availability_zones +# + +# The availability_zone to show internal services under +# (string value) +#internal_service_availability_zone=internal + +# Default compute node availability_zone (string value) +#default_availability_zone=nova + + +# +# Options defined in nova.crypto +# + +# Filename of root CA (string value) +#ca_file=cacert.pem + +# Filename of private key (string value) +#key_file=private/cakey.pem + +# Filename of root Certificate Revocation List (string value) +#crl_file=crl.pem + +# Where we keep our keys (string value) +#keys_path=$state_path/keys + +# Where we keep our root CA (string value) +#ca_path=$state_path/CA + +# Should we use a CA for each project? (boolean value) +#use_project_ca=false + +# Subject for certificate for users, %s for project, user, +# timestamp (string value) +#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s + +# Subject for certificate for projects, %s for project, +# timestamp (string value) +#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s + + +# +# Options defined in nova.exception +# + +# Make exception message format errors fatal (boolean value) +#fatal_exception_format_errors=false + + +# +# Options defined in nova.netconf +# + +# IP address of this host (string value) +my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# Name of this node. This can be an opaque identifier. It is +# not necessarily a hostname, FQDN, or IP address. However, +# the node name must be valid within an AMQP key, and if using +# ZeroMQ, a valid hostname, FQDN, or IP address (string value) +#host=nova + +# Use IPv6 (boolean value) +#use_ipv6=false + + +# +# Options defined in nova.notifications +# + +# If set, send compute.instance.update notifications on +# instance state changes. Valid values are None for no +# notifications, "vm_state" for notifications on VM state +# changes, or "vm_and_task_state" for notifications on VM and +# task state changes. (string value) +notify_on_state_change=vm_and_task_state + +# If set, send api.fault notifications on caught exceptions in +# the API service. (boolean value) +#notify_api_faults=false + +# Default notification level for outgoing notifications +# (string value) +#default_notification_level=INFO + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in nova.paths +# + +# Directory where the nova python module is installed (string +# value) +#pybasedir=/usr/lib/python/site-packages + +# Directory where nova binaries are installed (string value) +#bindir=/usr/local/bin + +# Top-level directory for maintaining nova's state (string +# value) +state_path=/var/lib/nova + + +# +# Options defined in nova.quota +# + +# Number of instances allowed per project (integer value) +#quota_instances=10 + +# Number of instance cores allowed per project (integer value) +#quota_cores=20 + +# Megabytes of instance RAM allowed per project (integer +# value) +#quota_ram=51200 + +# Number of floating IPs allowed per project (integer value) +#quota_floating_ips=10 + +# Number of fixed IPs allowed per project (this should be at +# least the number of instances allowed) (integer value) +#quota_fixed_ips=-1 + +# Number of metadata items allowed per instance (integer +# value) +#quota_metadata_items=128 + +# Number of injected files allowed (integer value) +#quota_injected_files=5 + +# Number of bytes allowed per injected file (integer value) +#quota_injected_file_content_bytes=10240 + +# Length of injected file path (integer value) +# Deprecated group/name - [DEFAULT]/quota_injected_file_path_bytes +#quota_injected_file_path_length=255 + +# Number of security groups per project (integer value) +#quota_security_groups=10 + +# Number of security rules per security group (integer value) +#quota_security_group_rules=20 + +# Number of key pairs per user (integer value) +#quota_key_pairs=100 + +# Number of server groups per project (integer value) +#quota_server_groups=10 + +# Number of servers per server group (integer value) +#quota_server_group_members=10 + +# Number of seconds until a reservation expires (integer +# value) +#reservation_expire=86400 + +# Count of reservations until usage is refreshed (integer +# value) +#until_refresh=0 + +# Number of seconds between subsequent usage refreshes +# (integer value) +#max_age=0 + +# Default driver to use for quota checks (string value) +#quota_driver=nova.quota.DbQuotaDriver + + +# +# Options defined in nova.service +# + +# Seconds between nodes reporting state to datastore (integer +# value) +#report_interval=10 + +# Enable periodic tasks (boolean value) +#periodic_enable=true + +# Range of seconds to randomly delay when starting the +# periodic task scheduler to reduce stampeding. (Disable by +# setting to 0) (integer value) +#periodic_fuzzy_delay=60 + +# A list of APIs to enable by default (list value) +enabled_apis=ec2,osapi_compute,metadata + +# A list of APIs with enabled SSL (list value) +#enabled_ssl_apis= + +# The IP address on which the EC2 API will listen. (string +# value) +#ec2_listen=0.0.0.0 + +# The port on which the EC2 API will listen. (integer value) +#ec2_listen_port=8773 + +# Number of workers for EC2 API service. The default will be +# equal to the number of CPUs available. (integer value) +#ec2_workers= + +# The IP address on which the OpenStack API will listen. +# (string value) +#osapi_compute_listen=0.0.0.0 + +# The port on which the OpenStack API will listen. (integer +# value) +#osapi_compute_listen_port=8774 + +# Number of workers for OpenStack API service. The default +# will be the number of CPUs available. (integer value) +#osapi_compute_workers= + +# OpenStack metadata service manager (string value) +#metadata_manager=nova.api.manager.MetadataManager + +# The IP address on which the metadata API will listen. +# (string value) +#metadata_listen=0.0.0.0 + +# The port on which the metadata API will listen. (integer +# value) +#metadata_listen_port=8775 + +# Number of workers for metadata service. The default will be +# the number of CPUs available. (integer value) +#metadata_workers= + +# Full class name for the Manager for compute (string value) +compute_manager={{ COMPUTE_MANAGER }} + +# Full class name for the Manager for console proxy (string +# value) +#console_manager=nova.console.manager.ConsoleProxyManager + +# Manager for console auth (string value) +#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager + +# Full class name for the Manager for cert (string value) +#cert_manager=nova.cert.manager.CertManager + +# Full class name for the Manager for network (string value) +#network_manager=nova.network.manager.VlanManager + +# Full class name for the Manager for scheduler (string value) +#scheduler_manager=nova.scheduler.manager.SchedulerManager + +# Maximum time since last check-in for up service (integer +# value) +#service_down_time=60 + + +# +# Options defined in nova.test +# + +# File name of clean sqlite db (string value) +#sqlite_clean_db=clean.sqlite + + +# +# Options defined in nova.utils +# + +# Whether to log monkey patching (boolean value) +#monkey_patch=false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator + +# Length of generated instance admin passwords (integer value) +#password_length=12 + +# Time period to generate instance usages for. Time period +# must be hour, day, month or year (string value) +instance_usage_audit_period=hour + +# Path to the rootwrap configuration file to use for running +# commands as root (string value) +rootwrap_config=/etc/nova/rootwrap.conf + +# Explicitly specify the temporary working directory (string +# value) +#tempdir= + + +# +# Options defined in nova.wsgi +# + +# File name for the paste.deploy config for nova-api (string +# value) +api_paste_config=api-paste.ini + +# A python format string that is used as the template to +# generate log lines. The following values can be formatted +# into it: client_ip, date_time, request_line, status_code, +# body_length, wall_seconds. (string value) +#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f + +# CA certificate file to use to verify connecting clients +# (string value) +#ssl_ca_file= + +# SSL certificate of API server (string value) +#ssl_cert_file= + +# SSL private key of API server (string value) +#ssl_key_file= + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepidle=600 + +# Size of the pool of greenthreads used by wsgi (integer +# value) +#wsgi_default_pool_size=1000 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + + +# +# Options defined in nova.api.auth +# + +# Whether to use per-user rate limiting for the api. This +# option is only used by v2 api. Rate limiting is removed from +# v3 api. (boolean value) +#api_rate_limit=false + +# The strategy to use for auth: noauth or keystone. (string +# value) +auth_strategy=keystone + +# Treat X-Forwarded-For as the canonical remote address. Only +# enable this if you have a sanitizing proxy. (boolean value) +#use_forwarded_for=false + + +# +# Options defined in nova.api.ec2 +# + +# Number of failed auths before lockout. (integer value) +#lockout_attempts=5 + +# Number of minutes to lockout if triggered. (integer value) +#lockout_minutes=15 + +# Number of minutes for lockout window. (integer value) +#lockout_window=15 + +# URL to get token from ec2 request. (string value) +#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens + +# Return the IP address as private dns hostname in describe +# instances (boolean value) +#ec2_private_dns_show_ip=false + +# Validate security group names according to EC2 specification +# (boolean value) +#ec2_strict_validation=true + +# Time in seconds before ec2 timestamp expires (integer value) +#ec2_timestamp_expiry=300 + + +# +# Options defined in nova.api.ec2.cloud +# + +# The IP address of the EC2 API server (string value) +#ec2_host=$my_ip + +# The internal IP address of the EC2 API server (string value) +#ec2_dmz_host=$my_ip + +# The port of the EC2 API server (integer value) +#ec2_port=8773 + +# The protocol to use when connecting to the EC2 API server +# (http, https) (string value) +#ec2_scheme=http + +# The path prefix used to call the ec2 API server (string +# value) +#ec2_path=/services/Cloud + +# List of region=fqdn pairs separated by commas (list value) +#region_list= + + +# +# Options defined in nova.api.metadata.base +# + +# List of metadata versions to skip placing into the config +# drive (string value) +#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 + +# Driver to use for vendor data (string value) +#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData + + +# +# Options defined in nova.api.metadata.vendordata_json +# + +# File to load JSON formatted vendor data from (string value) +#vendordata_jsonfile_path= + + +# +# Options defined in nova.api.openstack.common +# + +# The maximum number of items returned in a single response +# from a collection resource (integer value) +#osapi_max_limit=1000 + +# Base URL that will be presented to users in links to the +# OpenStack Compute API (string value) +#osapi_compute_link_prefix= + +# Base URL that will be presented to users in links to glance +# resources (string value) +#osapi_glance_link_prefix= + + +# +# Options defined in nova.api.openstack.compute +# + +# Permit instance snapshot operations. (boolean value) +#allow_instance_snapshots=true + + +# +# Options defined in nova.api.openstack.compute.contrib +# + +# Specify list of extensions to load when using +# osapi_compute_extension option with +# nova.api.openstack.compute.contrib.select_extensions (list +# value) +osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions + + +# +# Options defined in nova.api.openstack.compute.contrib.fping +# + +# Full path to fping. (string value) +#fping_path=/usr/sbin/fping + + +# +# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks +# + +# Enables or disables quota checking for tenant networks +# (boolean value) +#enable_network_quota=false + +# Control for checking for default networks (string value) +#use_neutron_default_nets=False + +# Default tenant id when creating neutron networks (string +# value) +#neutron_default_tenant_id=default + + +# +# Options defined in nova.api.openstack.compute.extensions +# + +# osapi compute extension to load (multi valued) +#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions + + +# +# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses +# + +# List of instance states that should hide network info (list +# value) +#osapi_hide_server_address_states=building + + +# +# Options defined in nova.api.openstack.compute.servers +# + +# Enables returning of the instance password by the relevant +# server API calls such as create, rebuild or rescue, If the +# hypervisor does not support password injection then the +# password returned will not be correct (boolean value) +#enable_instance_password=true + + +# +# Options defined in nova.api.sizelimit +# + +# The maximum body size per each osapi request(bytes) (integer +# value) +#osapi_max_request_body_size=114688 + + +# +# Options defined in nova.cert.rpcapi +# + +# The topic cert nodes listen on (string value) +#cert_topic=cert + + +# +# Options defined in nova.cloudpipe.pipelib +# + +# Image ID used when starting up a cloudpipe vpn server +# (string value) +#vpn_image_id=0 + +# Flavor for vpn instances (string value) +#vpn_flavor=m1.tiny + +# Template for cloudpipe instance boot script (string value) +#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template + +# Network to push into openvpn config (string value) +#dmz_net=10.0.0.0 + +# Netmask to push into openvpn config (string value) +#dmz_mask=255.255.255.0 + +# Suffix to add to project name for vpn key and secgroups +# (string value) +#vpn_key_suffix=-vpn + + +# +# Options defined in nova.cmd.novnc +# + +# Record sessions to FILE.[session_number] (boolean value) +#record=false + +# Become a daemon (background process) (boolean value) +#daemon=false + +# Disallow non-encrypted connections (boolean value) +#ssl_only=false + +# Source is ipv6 (boolean value) +#source_is_ipv6=false + +# SSL certificate file (string value) +#cert=self.pem + +# SSL key file (if separate from cert) (string value) +#key= + +# Run webserver on same port. Serve files from DIR. (string +# value) +#web=/usr/share/spice-html5 + + +# +# Options defined in nova.cmd.novncproxy +# + +# Host on which to listen for incoming requests (string value) +#novncproxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer +# value) +#novncproxy_port=6080 + + +# +# Options defined in nova.compute.api +# + +# Allow destination machine to match source for resize. Useful +# when testing in single-host environments. (boolean value) +#allow_resize_to_same_host=false + +# Allow migrate machine to the same host. Useful when testing +# in single-host environments. (boolean value) +#allow_migrate_to_same_host=false + +# Availability zone to use when user doesn't specify one +# (string value) +#default_schedule_zone= + +# These are image properties which a snapshot should not +# inherit from an instance (list value) +#non_inheritable_image_properties=cache_in_nova,bittorrent + +# Kernel image that indicates not to use a kernel, but to use +# a raw disk image instead (string value) +#null_kernel=nokernel + +# When creating multiple instances with a single request using +# the os-multiple-create API extension, this template will be +# used to build the display name for each instance. The +# benefit is that the instances end up with different +# hostnames. To restore legacy behavior of every instance +# having the same name, set this option to "%(name)s". Valid +# keys for the template are: name, uuid, count. (string value) +#multi_instance_display_name_template=%(name)s-%(uuid)s + +# Maximum number of devices that will result in a local image +# being created on the hypervisor node. Setting this to 0 +# means nova will allow only boot from volume. A negative +# number means unlimited. (integer value) +#max_local_block_devices=3 + + +# +# Options defined in nova.compute.flavors +# + +# Default flavor to use for the EC2 API only. The Nova API +# does not support a default flavor. (string value) +#default_flavor=m1.small + + +# +# Options defined in nova.compute.manager +# + +# Console proxy host to use to connect to instances on this +# host. (string value) +#console_host=nova + +# Name of network to use to set access IPs for instances +# (string value) +#default_access_ip_network_name= + +# Whether to batch up the application of IPTables rules during +# a host restart and apply all at the end of the init phase +# (boolean value) +#defer_iptables_apply=false + +# Where instances are stored on disk (string value) +#instances_path=$state_path/instances + +# Generate periodic compute.instance.exists notifications +# (boolean value) +instance_usage_audit=True + +# Number of 1 second retries needed in live_migration (integer +# value) +#live_migration_retry_count=30 + +# Whether to start guests that were running before the host +# rebooted (boolean value) +#resume_guests_state_on_host_boot=false + +# Number of times to retry network allocation on failures +# (integer value) +#network_allocate_retries=0 + +# Number of times to retry block device allocation on failures +# (integer value) +#block_device_allocate_retries=60 + +# The number of times to attempt to reap an instance's files. +# (integer value) +#maximum_instance_delete_attempts=5 + +# Interval to pull network bandwidth usage info. Not supported +# on all hypervisors. Set to -1 to disable. Setting this to 0 +# will disable, but this will change in the K release to mean +# "run at the default rate". (integer value) +#bandwidth_poll_interval=600 + +# Interval to sync power states between the database and the +# hypervisor. Set to -1 to disable. Setting this to 0 will +# disable, but this will change in Juno to mean "run at the +# default rate". (integer value) +#sync_power_state_interval=600 + +# Number of seconds between instance info_cache self healing +# updates (integer value) +#heal_instance_info_cache_interval=60 + +# Interval in seconds for reclaiming deleted instances +# (integer value) +#reclaim_instance_interval=0 + +# Interval in seconds for gathering volume usages (integer +# value) +#volume_usage_poll_interval=0 + +# Interval in seconds for polling shelved instances to +# offload. Set to -1 to disable.Setting this to 0 will +# disable, but this will change in Juno to mean "run at the +# default rate". (integer value) +#shelved_poll_interval=3600 + +# Time in seconds before a shelved instance is eligible for +# removing from a host. -1 never offload, 0 offload when +# shelved (integer value) +#shelved_offload_time=0 + +# Interval in seconds for retrying failed instance file +# deletes (integer value) +#instance_delete_interval=300 + +# Waiting time interval (seconds) between block device +# allocation retries on failures (integer value) +#block_device_allocate_retries_interval=3 + +# Action to take if a running deleted instance is +# detected.Valid options are 'noop', 'log', 'shutdown', or +# 'reap'. Set to 'noop' to take no action. (string value) +#running_deleted_instance_action=reap + +# Number of seconds to wait between runs of the cleanup task. +# (integer value) +#running_deleted_instance_poll_interval=1800 + +# Number of seconds after being deleted when a running +# instance should be considered eligible for cleanup. (integer +# value) +#running_deleted_instance_timeout=0 + +# Automatically hard reboot an instance if it has been stuck +# in a rebooting state longer than N seconds. Set to 0 to +# disable. (integer value) +#reboot_timeout=0 + +# Amount of time in seconds an instance can be in BUILD before +# going into ERROR status.Set to 0 to disable. (integer value) +#instance_build_timeout=0 + +# Automatically unrescue an instance after N seconds. Set to 0 +# to disable. (integer value) +#rescue_timeout=0 + +# Automatically confirm resizes after N seconds. Set to 0 to +# disable. (integer value) +#resize_confirm_window=0 + +# Total amount of time to wait in seconds for an instance to +# perform a clean shutdown. (integer value) +#shutdown_timeout=60 + + +# +# Options defined in nova.compute.monitors +# + +# Monitor classes available to the compute which may be +# specified more than once. (multi valued) +#compute_available_monitors=nova.compute.monitors.all_monitors + +# A list of monitors that can be used for getting compute +# metrics. (list value) +#compute_monitors= + + +# +# Options defined in nova.compute.resource_tracker +# + +# Amount of disk in MB to reserve for the host (integer value) +#reserved_host_disk_mb=0 + +# Amount of memory in MB to reserve for the host (integer +# value) +reserved_host_memory_mb={{ RESERVED_HOST_MEMORY_MB }} + +# Class that will manage stats for the local compute host +# (string value) +#compute_stats_class=nova.compute.stats.Stats + +# The names of the extra resources to track. (list value) +#compute_resources=vcpu + + +# +# Options defined in nova.compute.rpcapi +# + +# The topic compute nodes listen on (string value) +#compute_topic=compute + + +# +# Options defined in nova.conductor.tasks.live_migrate +# + +# Number of times to retry live-migration before failing. If +# == -1, try until out of hosts. If == 0, only try once, no +# retries. (integer value) +#migrate_max_retries=-1 + + +# +# Options defined in nova.console.manager +# + +# Driver to use for the console proxy (string value) +#console_driver=nova.console.xvp.XVPConsoleProxy + +# Stub calls to compute worker for tests (boolean value) +#stub_compute=false + +# Publicly visible name for this console host (string value) +#console_public_hostname=nova + + +# +# Options defined in nova.console.rpcapi +# + +# The topic console proxy nodes listen on (string value) +#console_topic=console + + +# +# Options defined in nova.console.vmrc +# + +# DEPRECATED. Port for VMware VMRC connections (integer value) +#console_vmrc_port=443 + +# DEPRECATED. Number of retries for retrieving VMRC +# information (integer value) +#console_vmrc_error_retries=10 + + +# +# Options defined in nova.console.xvp +# + +# XVP conf template (string value) +#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template + +# Generated XVP conf file (string value) +#console_xvp_conf=/etc/xvp.conf + +# XVP master process pid file (string value) +#console_xvp_pid=/var/run/xvp.pid + +# XVP log file (string value) +#console_xvp_log=/var/log/xvp.log + +# Port for XVP to multiplex VNC connections on (integer value) +#console_xvp_multiplex_port=5900 + + +# +# Options defined in nova.consoleauth +# + +# The topic console auth proxy nodes listen on (string value) +#consoleauth_topic=consoleauth + + +# +# Options defined in nova.consoleauth.manager +# + +# How many seconds before deleting tokens (integer value) +#console_token_ttl=600 + + +# +# Options defined in nova.db.api +# + +# Services to be added to the available pool on create +# (boolean value) +#enable_new_services=true + +# Template string to be used to generate instance names +# (string value) +instance_name_template=instance-%08x + +# Template string to be used to generate snapshot names +# (string value) +snapshot_name_template=snapshot-%s + + +# +# Options defined in nova.db.base +# + +# The driver to use for database access (string value) +#db_driver=nova.db + + +# +# Options defined in nova.db.sqlalchemy.api +# + +# When set, compute API will consider duplicate hostnames +# invalid within the specified scope, regardless of case. +# Should be empty, "project" or "global". (string value) +#osapi_compute_unique_server_name_scope= + + +# +# Options defined in nova.image.s3 +# + +# Parent directory for tempdir used for image decryption +# (string value) +#image_decryption_dir=/tmp + +# Hostname or IP for OpenStack to use when accessing the S3 +# api (string value) +#s3_host=$my_ip + +# Port used when accessing the S3 api (integer value) +#s3_port=3333 + +# Access key to use for S3 server for images (string value) +#s3_access_key=notchecked + +# Secret key to use for S3 server for images (string value) +#s3_secret_key=notchecked + +# Whether to use SSL when talking to S3 (boolean value) +#s3_use_ssl=false + +# Whether to affix the tenant id to the access key when +# downloading from S3 (boolean value) +#s3_affix_tenant=false + + +# +# Options defined in nova.ipv6.api +# + +# Backend to use for IPv6 generation (string value) +#ipv6_backend=rfc2462 + + +# +# Options defined in nova.network +# + +# The full class name of the network API class to use (string +# value) +network_api_class=nova.network.neutronv2.api.API + + +# +# Options defined in nova.network.driver +# + +# Driver to use for network creation (string value) +#network_driver=nova.network.linux_net + + +# +# Options defined in nova.network.floating_ips +# + +# Default pool for floating IPs (string value) +#default_floating_pool=nova + +# Autoassigning floating IP to VM (boolean value) +#auto_assign_floating_ip=false + +# Full class name for the DNS Manager for floating IPs (string +# value) +#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Manager for instance IPs (string +# value) +#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Zone for instance IPs (string +# value) +#instance_dns_domain= + + +# +# Options defined in nova.network.ldapdns +# + +# URL for LDAP server which will store DNS entries (string +# value) +#ldap_dns_url=ldap://ldap.example.com:389 + +# User for LDAP DNS (string value) +#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org + +# Password for LDAP DNS (string value) +#ldap_dns_password=password + +# Hostmaster for LDAP DNS driver Statement of Authority +# (string value) +#ldap_dns_soa_hostmaster=hostmaster@example.org + +# DNS Servers for LDAP DNS driver (multi valued) +#ldap_dns_servers=dns.example.org + +# Base DN for DNS entries in LDAP (string value) +#ldap_dns_base_dn=ou=hosts,dc=example,dc=org + +# Refresh interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_refresh=1800 + +# Retry interval (in seconds) for LDAP DNS driver Statement of +# Authority (string value) +#ldap_dns_soa_retry=3600 + +# Expiry interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_expiry=86400 + +# Minimum interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_minimum=7200 + + +# +# Options defined in nova.network.linux_net +# + +# Location of flagfiles for dhcpbridge (multi valued) +#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf + +# Location to keep network config files (string value) +#networks_path=$state_path/networks + +# Interface for public IP addresses (string value) +#public_interface=eth0 + +# Location of nova-dhcpbridge (string value) +#dhcpbridge=$bindir/nova-dhcpbridge + +# Public IP of network host (string value) +#routing_source_ip=$my_ip + +# Lifetime of a DHCP lease in seconds (integer value) +#dhcp_lease_time=86400 + +# If set, uses specific DNS server for dnsmasq. Can be +# specified multiple times. (multi valued) +#dns_server= + +# If set, uses the dns1 and dns2 from the network ref. as dns +# servers. (boolean value) +#use_network_dns_servers=false + +# A list of dmz range that should be accepted (list value) +#dmz_cidr= + +# Traffic to this range will always be snatted to the fallback +# ip, even if it would normally be bridged out of the node. +# Can be specified multiple times. (multi valued) +#force_snat_range= + +# Override the default dnsmasq settings with this file (string +# value) +#dnsmasq_config_file= + +# Driver used to create ethernet devices. (string value) +linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver + +# Name of Open vSwitch bridge used with linuxnet (string +# value) +#linuxnet_ovs_integration_bridge=br-int + +# Send gratuitous ARPs for HA setup (boolean value) +#send_arp_for_ha=false + +# Send this many gratuitous ARPs for HA setup (integer value) +#send_arp_for_ha_count=3 + +# Use single default gateway. Only first nic of vm will get +# default gateway from dhcp server (boolean value) +#use_single_default_gateway=false + +# An interface that bridges can forward to. If this is set to +# all then all traffic will be forwarded. Can be specified +# multiple times. (multi valued) +#forward_bridge_interface=all + +# The IP address for the metadata API server (string value) +#metadata_host=$my_ip + +# The port for the metadata API port (integer value) +#metadata_port=8775 + +# Regular expression to match iptables rule that should always +# be on the top. (string value) +#iptables_top_regex= + +# Regular expression to match iptables rule that should always +# be on the bottom. (string value) +#iptables_bottom_regex= + +# The table that iptables to jump to when a packet is to be +# dropped. (string value) +#iptables_drop_action=DROP + +# Amount of time, in seconds, that ovs_vsctl should wait for a +# response from the database. 0 is to wait forever. (integer +# value) +#ovs_vsctl_timeout=120 + +# If passed, use fake network devices and addresses (boolean +# value) +#fake_network=false + + +# +# Options defined in nova.network.manager +# + +# Bridge for simple network instances (string value) +#flat_network_bridge= + +# DNS server for simple network (string value) +#flat_network_dns=8.8.4.4 + +# Whether to attempt to inject network setup into guest +# (boolean value) +#flat_injected=false + +# FlatDhcp will bridge into this interface if set (string +# value) +#flat_interface= + +# First VLAN for private networks (integer value) +#vlan_start=100 + +# VLANs will bridge into this interface if set (string value) +#vlan_interface= + +# Number of networks to support (integer value) +#num_networks=1 + +# Public IP for the cloudpipe VPN servers (string value) +#vpn_ip=$my_ip + +# First Vpn port for private networks (integer value) +#vpn_start=1000 + +# Number of addresses in each private subnet (integer value) +#network_size=256 + +# Fixed IPv6 address block (string value) +#fixed_range_v6=fd00::/48 + +# Default IPv4 gateway (string value) +#gateway= + +# Default IPv6 gateway (string value) +#gateway_v6= + +# Number of addresses reserved for vpn clients (integer value) +#cnt_vpn_clients=0 + +# Seconds after which a deallocated IP is disassociated +# (integer value) +#fixed_ip_disassociate_timeout=600 + +# Number of attempts to create unique mac address (integer +# value) +#create_unique_mac_address_attempts=5 + +# If True, skip using the queue and make local calls (boolean +# value) +#fake_call=false + +# If True, unused gateway devices (VLAN and bridge) are +# deleted in VLAN network mode with multi hosted networks +# (boolean value) +#teardown_unused_network_gateway=false + +# If True, send a dhcp release on instance termination +# (boolean value) +#force_dhcp_release=true + +# If True, when a DNS entry must be updated, it sends a fanout +# cast to all network hosts to update their DNS entries in +# multi host mode (boolean value) +#update_dns_entries=false + +# Number of seconds to wait between runs of updates to DNS +# entries. (integer value) +#dns_update_periodic_interval=-1 + +# Domain to use for building the hostnames (string value) +#dhcp_domain=novalocal + +# Indicates underlying L3 management library (string value) +#l3_lib=nova.network.l3.LinuxNetL3 + + +# +# Options defined in nova.network.rpcapi +# + +# The topic network nodes listen on (string value) +#network_topic=network + +# Default value for multi_host in networks. Also, if set, some +# rpc network calls will be sent directly to host. (boolean +# value) +#multi_host=false + + +# +# Options defined in nova.network.security_group.openstack_driver +# + +# The full class name of the security API class (string value) +security_group_api=neutron + + +# +# Options defined in nova.objects.network +# + +# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE +# NETWORK. If True in multi_host mode, all compute hosts share +# the same dhcp address. The same IP address used for DHCP +# will be added on each nova-network node which is only +# visible to the vms on the same host. (boolean value) +#share_dhcp_address=false + +# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE +# NETWORK. MTU setting for network interface. (integer value) +#network_device_mtu= + + +# +# Options defined in nova.objectstore.s3server +# + +# Path to S3 buckets (string value) +#buckets_path=$state_path/buckets + +# IP address for S3 API to listen (string value) +#s3_listen=0.0.0.0 + +# Port for S3 API to listen (integer value) +#s3_listen_port=3333 + + +# +# Options defined in nova.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in nova.openstack.common.lockutils +# + +# Enables or disables inter-process locks. (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +lock_path=/var/lock/nova + + +# +# Options defined in nova.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error. (boolean value) +#use_stderr=true + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will change in J to honor RFC5424. (boolean +# value) +use_syslog=True + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in nova.openstack.common.memorycache +# + +# Memcached servers or None for in process cache. (list value) +#memcached_servers= + + +# +# Options defined in nova.openstack.common.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + + +# +# Options defined in nova.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. +# (string value) +#policy_default_rule=default + + +# +# Options defined in nova.pci.pci_request +# + +# An alias for a PCI passthrough device requirement. This +# allows users to specify the alias in the extra_spec for a +# flavor, without needing to repeat all the PCI property +# requirements. For example: pci_alias = { "name": +# "QuicAssist", "product_id": "0443", "vendor_id": "8086", +# "device_type": "ACCEL" } defines an alias for the Intel +# QuickAssist card. (multi valued) (multi valued) +#pci_alias= + + +# +# Options defined in nova.pci.pci_whitelist +# + +# White list of PCI devices available to VMs. For example: +# pci_passthrough_whitelist = [{"vendor_id": "8086", +# "product_id": "0443"}] (multi valued) +#pci_passthrough_whitelist= + + +# +# Options defined in nova.scheduler.driver +# + +# The scheduler host manager class to use (string value) +scheduler_host_manager={{ SCHEDULER_HOST_MANAGER }} + + +# +# Options defined in nova.scheduler.filter_scheduler +# + +# New instances will be scheduled on a host chosen randomly +# from a subset of the N best hosts. This property defines the +# subset size that a host is chosen from. A value of 1 chooses +# the first host returned by the weighing functions. This +# value must be at least 1. Any value less than 1 will be +# ignored, and 1 will be used instead (integer value) +#scheduler_host_subset_size=1 + + +# +# Options defined in nova.scheduler.filters.aggregate_image_properties_isolation +# + +# Force the filter to consider only keys matching the given +# namespace. (string value) +#aggregate_image_properties_isolation_namespace= + +# The separator used between the namespace and keys (string +# value) +#aggregate_image_properties_isolation_separator=. + + +# +# Options defined in nova.scheduler.filters.core_filter +# + +# Virtual CPU to physical CPU allocation ratio which affects +# all CPU filters. This configuration specifies a global ratio +# for CoreFilter. For AggregateCoreFilter, it will fall back +# to this configuration value if no per-aggregate setting +# found. (floating point value) +#cpu_allocation_ratio=16.0 + + +# +# Options defined in nova.scheduler.filters.disk_filter +# + +# Virtual disk to physical disk allocation ratio (floating +# point value) +#disk_allocation_ratio=1.0 + + +# +# Options defined in nova.scheduler.filters.io_ops_filter +# + +# Tells filters to ignore hosts that have this many or more +# instances currently in build, resize, snapshot, migrate, +# rescue or unshelve task states (integer value) +#max_io_ops_per_host=8 + + +# +# Options defined in nova.scheduler.filters.isolated_hosts_filter +# + +# Images to run on isolated host (list value) +#isolated_images= + +# Host reserved for specific images (list value) +#isolated_hosts= + +# Whether to force isolated hosts to run only isolated images +# (boolean value) +#restrict_isolated_hosts_to_isolated_images=true + + +# +# Options defined in nova.scheduler.filters.num_instances_filter +# + +# Ignore hosts that have too many instances (integer value) +#max_instances_per_host=50 + + +# +# Options defined in nova.scheduler.filters.ram_filter +# + +# Virtual ram to physical ram allocation ratio which affects +# all ram filters. This configuration specifies a global ratio +# for RamFilter. For AggregateRamFilter, it will fall back to +# this configuration value if no per-aggregate setting found. +# (floating point value) +ram_allocation_ratio={{ RAM_ALLOCATION_RATIO }} + + +# +# Options defined in nova.scheduler.host_manager +# + +# Filter classes available to the scheduler which may be +# specified more than once. An entry of +# "nova.scheduler.filters.standard_filters" maps to all +# filters included with nova. (multi valued) +#scheduler_available_filters=nova.scheduler.filters.all_filters + +# Which filter class names to use for filtering hosts when not +# specified in the request. (list value) +scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter + +# Which weight class names to use for weighing hosts (list +# value) +#scheduler_weight_classes=nova.scheduler.weights.all_weighers + + +# +# Options defined in nova.scheduler.ironic_host_manager +# + +# Which filter class names to use for filtering baremetal +# hosts when not specified in the request. (list value) +#baremetal_scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter + +# Flag to decide whether to use +# baremetal_scheduler_default_filters or not. (boolean value) +#scheduler_use_baremetal_filters=false + + +# +# Options defined in nova.scheduler.manager +# + +# Default driver to use for the scheduler (string value) +scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler + +# How often (in seconds) to run periodic tasks in the +# scheduler driver of your choice. Please note this is likely +# to interact with the value of service_down_time, but exactly +# how they interact will depend on your choice of scheduler +# driver. (integer value) +#scheduler_driver_task_period=60 + + +# +# Options defined in nova.scheduler.rpcapi +# + +# The topic scheduler nodes listen on (string value) +#scheduler_topic=scheduler + + +# +# Options defined in nova.scheduler.scheduler_options +# + +# Absolute path to scheduler configuration JSON file. (string +# value) +#scheduler_json_config_location= + + +# +# Options defined in nova.scheduler.utils +# + +# Maximum number of attempts to schedule an instance (integer +# value) +#scheduler_max_attempts=3 + + +# +# Options defined in nova.scheduler.weights.ram +# + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=1.0 + + +# +# Options defined in nova.servicegroup.api +# + +# The driver for servicegroup service (valid options are: db, +# zk, mc) (string value) +#servicegroup_driver=db + + +# +# Options defined in nova.virt.configdrive +# + +# Config drive format. One of iso9660 (default) or vfat +# (string value) +#config_drive_format=iso9660 + +# DEPRECATED (not needed any more): Where to put temporary +# files associated with config drive creation (string value) +#config_drive_tempdir= + +# Set to force injection to take place on a config drive (if +# set, valid options are: always) (string value) +#force_config_drive= + +# Name and optionally path of the tool used for ISO image +# creation (string value) +#mkisofs_cmd=genisoimage + + +# +# Options defined in nova.virt.disk.api +# + +# Name of the mkfs commands for ephemeral device. The format +# is = (multi valued) +#virt_mkfs= + +# Attempt to resize the filesystem by accessing the image over +# a block device. This is done by the host and may not be +# necessary if the image contains a recent version of cloud- +# init. Possible mechanisms require the nbd driver (for qcow +# and raw), or loop (for raw). (boolean value) +#resize_fs_using_block_device=false + + +# +# Options defined in nova.virt.disk.mount.nbd +# + +# Amount of time, in seconds, to wait for NBD device start up. +# (integer value) +#timeout_nbd=10 + + +# +# Options defined in nova.virt.driver +# + +# Driver to use for controlling virtualization. Options +# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, +# fake.FakeDriver, baremetal.BareMetalDriver, +# vmwareapi.VMwareVCDriver, hyperv.HyperVDriver (string value) +compute_driver={{ COMPUTE_DRIVER }} + +# The default format an ephemeral_volume will be formatted +# with on creation. (string value) +#default_ephemeral_format= + +# VM image preallocation mode: "none" => no storage +# provisioning is done up front, "space" => storage is fully +# allocated at instance start (string value) +#preallocate_images=none + +# Whether to use cow images (boolean value) +#use_cow_images=true + +# Fail instance boot if vif plugging fails (boolean value) +#vif_plugging_is_fatal=true + +# Number of seconds to wait for neutron vif plugging events to +# arrive before continuing or failing (see +# vif_plugging_is_fatal). If this is set to zero and +# vif_plugging_is_fatal is False, events should not be +# expected to arrive at all. (integer value) +#vif_plugging_timeout=300 + + +# +# Options defined in nova.virt.firewall +# + +# Firewall driver (defaults to hypervisor specific iptables +# driver) (string value) +firewall_driver=nova.virt.firewall.NoopFirewallDriver + +# Whether to allow network traffic from same network (boolean +# value) +#allow_same_net_traffic=true + + +# +# Options defined in nova.virt.hardware +# + +# Defines which pcpus that instance vcpus can use. For +# example, "4-12,^8,15" (string value) +#vcpu_pin_set= + + +# +# Options defined in nova.virt.imagecache +# + +# Number of seconds to wait between runs of the image cache +# manager. Set to -1 to disable. Setting this to 0 will +# disable, but this will change in the K release to mean "run +# at the default rate". (integer value) +#image_cache_manager_interval=2400 + +# Where cached images are stored under $instances_path. This +# is NOT the full path - just a folder name. For per-compute- +# host cached images, set to _base_$my_ip (string value) +#image_cache_subdirectory_name=_base + +# Should unused base images be removed? (boolean value) +#remove_unused_base_images=true + +# Unused unresized base images younger than this will not be +# removed (integer value) +#remove_unused_original_minimum_age_seconds=86400 + + +# +# Options defined in nova.virt.images +# + +# Force backing images to raw format (boolean value) +#force_raw_images=true + + +# +# Options defined in nova.virt.netutils +# + +# Template file for injected network (string value) +#injected_network_template=$pybasedir/nova/virt/interfaces.template + + +# +# Options defined in nova.vnc +# + +# Location of VNC console proxy, in the form +# "http://127.0.0.1:6080/vnc_auto.html" (string value) +novncproxy_base_url=http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:6080/vnc_auto.html + +# Location of nova xvp VNC console proxy, in the form +# "http://127.0.0.1:6081/console" (string value) +#xvpvncproxy_base_url=http://127.0.0.1:6081/console + +# IP address on which instance vncservers should listen +# (string value) +vncserver_listen=0.0.0.0 + +# The address to which proxy clients (like nova-xvpvncproxy) +# should connect (string value) +vncserver_proxyclient_address={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +# Enable VNC related features (boolean value) +vnc_enabled=true + +# Keymap for VNC (string value) +vnc_keymap=en-us + + +# +# Options defined in nova.vnc.xvp_proxy +# + +# Port that the XCP VNC proxy should bind to (integer value) +#xvpvncproxy_port=6081 + +# Address that the XCP VNC proxy should bind to (string value) +#xvpvncproxy_host=0.0.0.0 + + +# +# Options defined in nova.volume +# + +# The full class name of the volume API class to use (string +# value) +#volume_api_class=nova.volume.cinder.API + + +[baremetal] + +# +# Options defined in nova.virt.baremetal.db.api +# + +# The backend to use for bare-metal database (string value) +#db_backend=sqlalchemy + + +# +# Options defined in nova.virt.baremetal.db.sqlalchemy.session +# + +# The SQLAlchemy connection string used to connect to the +# bare-metal database (string value) +#sql_connection=sqlite:///$state_path/baremetal_nova.sqlite + + +# +# Options defined in nova.virt.baremetal.driver +# + +# Baremetal VIF driver. (string value) +#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver + +# Baremetal volume driver. (string value) +#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver + +# A list of additional capabilities corresponding to +# flavor_extra_specs for this compute host to advertise. Valid +# entries are name=value, pairs For example, "key1:val1, +# key2:val2" (list value) +#flavor_extra_specs= + +# Baremetal driver back-end (pxe or tilera) (string value) +#driver=nova.virt.baremetal.pxe.PXE + +# Baremetal power management method (string value) +#power_manager=nova.virt.baremetal.ipmi.IPMI + +# Baremetal compute node's tftp root path (string value) +#tftp_root=/tftpboot + + +# +# Options defined in nova.virt.baremetal.ipmi +# + +# Path to baremetal terminal program (string value) +#terminal=shellinaboxd + +# Path to baremetal terminal SSL cert(PEM) (string value) +#terminal_cert_dir= + +# Path to directory stores pidfiles of baremetal_terminal +# (string value) +#terminal_pid_dir=$state_path/baremetal/console + +# Maximal number of retries for IPMI operations (integer +# value) +#ipmi_power_retry=10 + + +# +# Options defined in nova.virt.baremetal.pxe +# + +# Default kernel image ID used in deployment phase (string +# value) +#deploy_kernel= + +# Default ramdisk image ID used in deployment phase (string +# value) +#deploy_ramdisk= + +# Template file for injected network config (string value) +#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template + +# Additional append parameters for baremetal PXE boot (string +# value) +#pxe_append_params=nofb nomodeset vga=normal + +# Template file for PXE configuration (string value) +#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template + +# If True, enable file injection for network info, files and +# admin password (boolean value) +#use_file_injection=false + +# Timeout for PXE deployments. Default: 0 (unlimited) (integer +# value) +#pxe_deploy_timeout=0 + +# If set, pass the network configuration details to the +# initramfs via cmdline. (boolean value) +#pxe_network_config=false + +# This gets passed to Neutron as the bootfile dhcp parameter. +# (string value) +#pxe_bootfile_name=pxelinux.0 + + +# +# Options defined in nova.virt.baremetal.tilera_pdu +# + +# IP address of tilera pdu (string value) +#tile_pdu_ip=10.0.100.1 + +# Management script for tilera pdu (string value) +#tile_pdu_mgr=/tftpboot/pdu_mgr + +# Power status of tilera PDU is OFF (integer value) +#tile_pdu_off=2 + +# Power status of tilera PDU is ON (integer value) +#tile_pdu_on=1 + +# Power status of tilera PDU (integer value) +#tile_pdu_status=9 + +# Wait time in seconds until check the result after tilera +# power operations (integer value) +#tile_power_wait=9 + + +# +# Options defined in nova.virt.baremetal.virtual_power_driver +# + +# IP or name to virtual power host (string value) +#virtual_power_ssh_host= + +# Port to use for ssh to virtual power host (integer value) +#virtual_power_ssh_port=22 + +# Base command to use for virtual power(vbox, virsh) (string +# value) +#virtual_power_type=virsh + +# User to execute virtual power commands as (string value) +#virtual_power_host_user= + +# Password for virtual power host_user (string value) +#virtual_power_host_pass= + +# The ssh key for virtual power host_user (string value) +#virtual_power_host_key= + + +# +# Options defined in nova.virt.baremetal.volume_driver +# + +# Do not set this out of dev/test environments. If a node does +# not have a fixed PXE IP address, volumes are exported with +# globally opened ACL (boolean value) +#use_unsafe_iscsi=false + +# The iSCSI IQN prefix used in baremetal volume connections. +# (string value) +#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal + + +[cells] + +# +# Options defined in nova.cells.manager +# + +# Cells communication driver to use (string value) +#driver=nova.cells.rpc_driver.CellsRPCDriver + +# Number of seconds after an instance was updated or deleted +# to continue to update cells (integer value) +#instance_updated_at_threshold=3600 + +# Number of instances to update per periodic task run (integer +# value) +#instance_update_num_instances=1 + + +# +# Options defined in nova.cells.messaging +# + +# Maximum number of hops for cells routing. (integer value) +#max_hop_count=10 + +# Cells scheduler to use (string value) +#scheduler=nova.cells.scheduler.CellsScheduler + + +# +# Options defined in nova.cells.opts +# + +# Enable cell functionality (boolean value) +#enable=false + +# The topic cells nodes listen on (string value) +#topic=cells + +# Manager for cells (string value) +#manager=nova.cells.manager.CellsManager + +# Name of this cell (string value) +#name=nova + +# Key/Multi-value list with the capabilities of the cell (list +# value) +#capabilities=hypervisor=xenserver;kvm,os=linux;windows + +# Seconds to wait for response from a call to a cell. (integer +# value) +#call_timeout=60 + +# Percentage of cell capacity to hold in reserve. Affects both +# memory and disk utilization (floating point value) +#reserve_percent=10.0 + +# Type of cell: api or compute (string value) +#cell_type=compute + +# Number of seconds after which a lack of capability and +# capacity updates signals the child cell is to be treated as +# a mute. (integer value) +#mute_child_interval=300 + +# Seconds between bandwidth updates for cells. (integer value) +#bandwidth_update_interval=600 + + +# +# Options defined in nova.cells.rpc_driver +# + +# Base queue name to use when communicating between cells. +# Various topics by message type will be appended to this. +# (string value) +#rpc_driver_queue_base=cells.intercell + + +# +# Options defined in nova.cells.scheduler +# + +# Filter classes the cells scheduler should use. An entry of +# "nova.cells.filters.all_filters" maps to all cells filters +# included with nova. (list value) +#scheduler_filter_classes=nova.cells.filters.all_filters + +# Weigher classes the cells scheduler should use. An entry of +# "nova.cells.weights.all_weighers" maps to all cell weighers +# included with nova. (list value) +#scheduler_weight_classes=nova.cells.weights.all_weighers + +# How many retries when no cells are available. (integer +# value) +#scheduler_retries=10 + +# How often to retry in seconds when no cells are available. +# (integer value) +#scheduler_retry_delay=2 + + +# +# Options defined in nova.cells.state +# + +# Interval, in seconds, for getting fresh cell information +# from the database. (integer value) +#db_check_interval=60 + +# Configuration file from which to read cells configuration. +# If given, overrides reading cells from the database. (string +# value) +#cells_config= + + +# +# Options defined in nova.cells.weights.mute_child +# + +# Multiplier used to weigh mute children. (The value should be +# negative.) (floating point value) +#mute_weight_multiplier=-10.0 + +# Weight value assigned to mute children. (The value should be +# positive.) (floating point value) +#mute_weight_value=1000.0 + + +# +# Options defined in nova.cells.weights.ram_by_instance_type +# + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=10.0 + + +# +# Options defined in nova.cells.weights.weight_offset +# + +# Multiplier used to weigh offset weigher. (floating point +# value) +#offset_weight_multiplier=1.0 + + +[cinder] + +# +# Options defined in nova.volume.cinder +# + +# Info to match when looking for cinder in the service +# catalog. Format is: separated values of the form: +# :: (string value) +# Deprecated group/name - [DEFAULT]/cinder_catalog_info +#catalog_info=volume:cinder:publicURL + +# Override service catalog lookup with template for cinder +# endpoint e.g. http://localhost:8776/v1/%(project_id)s +# (string value) +# Deprecated group/name - [DEFAULT]/cinder_endpoint_template +#endpoint_template= + +# Region name of this node (string value) +# Deprecated group/name - [DEFAULT]/os_region_name +#os_region_name= + +# Location of ca certificates file to use for cinder client +# requests. (string value) +# Deprecated group/name - [DEFAULT]/cinder_ca_certificates_file +#ca_certificates_file= + +# Number of cinderclient retries on failed http calls (integer +# value) +# Deprecated group/name - [DEFAULT]/cinder_http_retries +#http_retries=3 + +# HTTP inactivity timeout (in seconds) (integer value) +# Deprecated group/name - [DEFAULT]/cinder_http_timeout +#http_timeout= + +# Allow to perform insecure SSL requests to cinder (boolean +# value) +# Deprecated group/name - [DEFAULT]/cinder_api_insecure +#api_insecure=false + +# Allow attach between instance and volume in different +# availability zones. (boolean value) +# Deprecated group/name - [DEFAULT]/cinder_cross_az_attach +#cross_az_attach=true + + +[conductor] + +# +# Options defined in nova.conductor.api +# + +# Perform nova-conductor operations locally (boolean value) +use_local=true + +# The topic on which conductor nodes listen (string value) +#topic=conductor + +# Full class name for the Manager for conductor (string value) +#manager=nova.conductor.manager.ConductorManager + +# Number of workers for OpenStack Conductor service. The +# default will be the number of CPUs available. (integer +# value) +#workers= + + +[ephemeral_storage_encryption] + +# +# Options defined in nova.compute.api +# + +# Whether to encrypt ephemeral storage (boolean value) +#enabled=false + +# The cipher and mode to be used to encrypt ephemeral storage. +# Which ciphers are available ciphers depends on kernel +# support. See /proc/crypto for the list of available options. +# (string value) +#cipher=aes-xts-plain64 + +# The bit length of the encryption key to be used to encrypt +# ephemeral storage (in XTS mode only half of the bits are +# used for encryption key) (integer value) +#key_size=512 + + +[glance] + +# +# Options defined in nova.image.glance +# + +# Default glance hostname or IP address (string value) +# Deprecated group/name - [DEFAULT]/glance_host +host={{ CONTROLLER_HOST_ADDRESS }} + +# Default glance port (integer value) +# Deprecated group/name - [DEFAULT]/glance_port +port=9292 + +# Default protocol to use when connecting to glance. Set to +# https for SSL. (string value) +# Deprecated group/name - [DEFAULT]/glance_protocol +protocol=http + +# A list of the glance api servers available to nova. Prefix +# with https:// for ssl-based glance api servers. +# ([hostname|ip]:port) (list value) +# Deprecated group/name - [DEFAULT]/glance_api_servers +api_servers=$host:$port + +# Allow to perform insecure SSL (https) requests to glance +# (boolean value) +# Deprecated group/name - [DEFAULT]/glance_api_insecure +#api_insecure=false + +# Number of retries when downloading an image from glance +# (integer value) +# Deprecated group/name - [DEFAULT]/glance_num_retries +#num_retries=0 + +# A list of url scheme that can be downloaded directly via the +# direct_url. Currently supported schemes: [file]. (list +# value) +#allowed_direct_url_schemes= + + +[hyperv] + +# +# Options defined in nova.virt.hyperv.pathutils +# + +# The name of a Windows share name mapped to the +# "instances_path" dir and used by the resize feature to copy +# files to the target host. If left blank, an administrative +# share will be used, looking for the same "instances_path" +# used locally (string value) +#instances_path_share= + + +# +# Options defined in nova.virt.hyperv.utilsfactory +# + +# Force V1 WMI utility classes (boolean value) +#force_hyperv_utils_v1=false + +# Force V1 volume utility class (boolean value) +#force_volumeutils_v1=false + + +# +# Options defined in nova.virt.hyperv.vif +# + +# External virtual switch Name, if not provided, the first +# external virtual switch is used (string value) +#vswitch_name= + + +# +# Options defined in nova.virt.hyperv.vmops +# + +# Required for live migration among hosts with different CPU +# features (boolean value) +#limit_cpu_features=false + +# Sets the admin password in the config drive image (boolean +# value) +#config_drive_inject_password=false + +# Path of qemu-img command which is used to convert between +# different image types (string value) +#qemu_img_cmd=qemu-img.exe + +# Attaches the Config Drive image as a cdrom drive instead of +# a disk drive (boolean value) +#config_drive_cdrom=false + +# Enables metrics collections for an instance by using +# Hyper-V's metric APIs. Collected data can by retrieved by +# other apps and services, e.g.: Ceilometer. Requires Hyper-V +# / Windows Server 2012 and above (boolean value) +#enable_instance_metrics_collection=false + +# Enables dynamic memory allocation (ballooning) when set to a +# value greater than 1. The value expresses the ratio between +# the total RAM assigned to an instance and its startup RAM +# amount. For example a ratio of 2.0 for an instance with +# 1024MB of RAM implies 512MB of RAM allocated at startup +# (floating point value) +#dynamic_memory_ratio=1.0 + +# Number of seconds to wait for instance to shut down after +# soft reboot request is made. We fall back to hard reboot if +# instance does not shutdown within this window. (integer +# value) +#wait_soft_reboot_seconds=60 + + +# +# Options defined in nova.virt.hyperv.volumeops +# + +# The number of times to retry to attach a volume (integer +# value) +#volume_attach_retry_count=10 + +# Interval between volume attachment attempts, in seconds +# (integer value) +#volume_attach_retry_interval=5 + +# The number of times to retry checking for a disk mounted via +# iSCSI. (integer value) +#mounted_disk_query_retry_count=10 + +# Interval between checks for a mounted iSCSI disk, in +# seconds. (integer value) +#mounted_disk_query_retry_interval=5 + + +[image_file_url] + +# +# Options defined in nova.image.download.file +# + +# List of file systems that are configured in this file in the +# image_file_url: sections (list value) +#filesystems= + + +[ironic] + +# +# Options defined in nova.virt.ironic.driver +# + +# Version of Ironic API service endpoint. (integer value) +#api_version=1 + +# URL for Ironic API endpoint. (string value) +api_endpoint=http://{{ CONTROLLER_HOST_ADDRESS }}:6385/v1 + +# Ironic keystone admin name (string value) +admin_username={{ IRONIC_SERVICE_USER }} + +# Ironic keystone admin password. (string value) +admin_password={{ IRONIC_SERVICE_PASSWORD }} + +# Ironic keystone auth token. (string value) +#admin_auth_token= + +# Keystone public API endpoint. (string value) +admin_url=http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 + +# Log level override for ironicclient. Set this in order to +# override the global "default_log_levels", "verbose", and +# "debug" settings. (string value) +#client_log_level= + +# Ironic keystone tenant name. (string value) +admin_tenant_name=service + +# How many retries when a request does conflict. (integer +# value) +#api_max_retries=60 + +# How often to retry in seconds when a request does conflict +# (integer value) +#api_retry_interval=2 + + +[keymgr] + +# +# Options defined in nova.keymgr +# + +# The full class name of the key manager API class (string +# value) +#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager + + +# +# Options defined in nova.keymgr.conf_key_mgr +# + +# Fixed key returned by key manager, specified in hex (string +# value) +#fixed_key= + + +[keystone_authtoken] + +# +# Options defined in keystonemiddleware.auth_token +# + +# Prefix to prepend at the beginning of the path. Deprecated, +# use identity_uri. (string value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint. Deprecated, +# use identity_uri. (string value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use +# identity_uri. (integer value) +auth_port=35357 + +# Protocol of the admin Identity API endpoint (http or https). +# Deprecated, use identity_uri. (string value) +auth_protocol=http + +# Complete public Identity API endpoint (string value) +auth_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 + +# Complete admin Identity API endpoint. This should specify +# the unversioned root endpoint e.g. https://localhost:35357/ +# (string value) +identity_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:35357 + +# API version of the admin Identity API endpoint (string +# value) +auth_version=v2.0 + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# This option is deprecated and may be removed in a future +# release. Single shared secret with the Keystone +# configuration used for bootstrapping a Keystone +# installation, or otherwise bypassing the normal +# authentication process. This option should not be used, use +# `admin_user` and `admin_password` instead. (string value) +#admin_token= + +# Keystone account username (string value) +admin_user={{ NOVA_SERVICE_USER }} + +# Keystone account password (string value) +admin_password={{ NOVA_SERVICE_PASSWORD }} + +# Keystone service account tenant name to validate user tokens +# (string value) +admin_tenant_name=service + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for +# caching. If left undefined, tokens will instead be cached +# in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating +# tokens, the middleware caches previously-seen tokens for a +# configurable duration (in seconds). Set to -1 to disable +# caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens +# is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache +# duration may significantly reduce performance. (integer +# value) +#revocation_cache_time=10 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) number of seconds memcached server is considered +# dead before it is tried again. (integer value) +#memcache_pool_dead_retry=300 + +# (optional) max total number of open connections to every +# memcached server. (integer value) +#memcache_pool_maxsize=10 + +# (optional) socket timeout in seconds for communicating with +# a memcache server. (integer value) +#memcache_pool_socket_timeout=3 + +# (optional) number of seconds a connection to memcached is +# held unused in the pool before it is closed. (integer value) +#memcache_pool_unused_timeout=60 + +# (optional) number of seconds that an operation will wait to +# get a memcache client connection from the pool. (integer +# value) +#memcache_pool_conn_get_timeout=10 + +# (optional) use the advanced (eventlet safe) memcache client +# pool. The advanced pool will only work under python 2.x. +# (boolean value) +#memcache_use_advanced_pool=false + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + +# If true, the revocation list will be checked for cached +# tokens. This requires that PKI tokens are configured on the +# Keystone server. (boolean value) +#check_revocations_for_cached=false + +# Hash algorithms to use for hashing PKI tokens. This may be a +# single algorithm or multiple. The algorithms are those +# supported by Python standard hashlib.new(). The hashes will +# be tried in the order given, so put the preferred one first +# for performance. The result of the first hash will be stored +# in the cache. This will typically be set to multiple values +# only while migrating from a less secure algorithm to a more +# secure one. Once all the old tokens are expired this option +# should be set to a single value for better performance. +# (list value) +#hash_algorithms=md5 + + +[libvirt] + +# +# Options defined in nova.virt.libvirt.driver +# + +# Rescue ami image. This will not be used if an image id is +# provided by the user. (string value) +#rescue_image_id= + +# Rescue aki image (string value) +#rescue_kernel_id= + +# Rescue ari image (string value) +#rescue_ramdisk_id= + +# Libvirt domain type (valid options are: kvm, lxc, qemu, uml, +# xen) (string value) +virt_type={{ NOVA_VIRT_TYPE }} + +# Override the default libvirt URI (which is dependent on +# virt_type) (string value) +#connection_uri= + +# Inject the admin password at boot time, without an agent. +# (boolean value) +#inject_password=false + +# Inject the ssh public key at boot time (boolean value) +#inject_key=false + +# The partition to inject to : -2 => disable, -1 => inspect +# (libguestfs only), 0 => not partitioned, >0 => partition +# number (integer value) +#inject_partition=-2 + +# Sync virtual and real mouse cursors in Windows VMs (boolean +# value) +#use_usb_tablet=true + +# Migration target URI (any included "%s" is replaced with the +# migration target hostname) (string value) +#live_migration_uri=qemu+tcp://%s/system + +# Migration flags to be set for live migration (string value) +#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED + +# Migration flags to be set for block migration (string value) +#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_INC + +# Maximum bandwidth to be used during migration, in Mbps +# (integer value) +#live_migration_bandwidth=0 + +# Snapshot image format (valid options are : raw, qcow2, vmdk, +# vdi). Defaults to same as source image (string value) +#snapshot_image_format= + +# DEPRECATED. Libvirt handlers for remote volumes. This option +# is deprecated and will be removed in the Kilo release. (list +# value) +#volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver + +# Override the default disk prefix for the devices attached to +# a server, which is dependent on virt_type. (valid options +# are: sd, xvd, uvd, vd) (string value) +#disk_prefix= + +# Number of seconds to wait for instance to shut down after +# soft reboot request is made. We fall back to hard reboot if +# instance does not shutdown within this window. (integer +# value) +#wait_soft_reboot_seconds=120 + +# Set to "host-model" to clone the host CPU feature flags; to +# "host-passthrough" to use the host CPU model exactly; to +# "custom" to use a named CPU model; to "none" to not set any +# CPU model. If virt_type="kvm|qemu", it will default to +# "host-model", otherwise it will default to "none" (string +# value) +#cpu_mode= + +# Set to a named libvirt CPU model (see names listed in +# /usr/share/libvirt/cpu_map.xml). Only has effect if +# cpu_mode="custom" and virt_type="kvm|qemu" (string value) +#cpu_model= + +# Location where libvirt driver will store snapshots before +# uploading them to image service (string value) +#snapshots_directory=$instances_path/snapshots + +# Location where the Xen hvmloader is kept (string value) +#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader + +# Specific cachemodes to use for different disk types e.g: +# file=directsync,block=none (list value) +#disk_cachemodes= + +# A path to a device that will be used as source of entropy on +# the host. Permitted options are: /dev/random or /dev/hwrng +# (string value) +#rng_dev_path= + +# For qemu or KVM guests, set this option to specify a default +# machine type per host architecture. You can find a list of +# supported machine types in your environment by checking the +# output of the "virsh capabilities"command. The format of the +# value for this config option is host-arch=machine-type. For +# example: x86_64=machinetype1,armv7l=machinetype2 (list +# value) +#hw_machine_type= + +# The data source used to the populate the host "serial" UUID +# exposed to guest in the virtual BIOS. Permitted options are +# "hardware", "os", "none" or "auto" (default). (string value) +#sysinfo_serial=auto + +# A number of seconds to memory usage statistics period. Zero +# or negative value mean to disable memory usage statistics. +# (integer value) +#mem_stats_period_seconds=10 + +# List of uid targets and ranges.Syntax is guest-uid:host- +# uid:countMaximum of 5 allowed. (list value) +#uid_maps= + +# List of guid targets and ranges.Syntax is guest-gid:host- +# gid:countMaximum of 5 allowed. (list value) +#gid_maps= + + +# +# Options defined in nova.virt.libvirt.imagebackend +# + +# VM Images format. Acceptable values are: raw, qcow2, lvm, +# rbd, default. If default is specified, then use_cow_images +# flag is used instead of this one. (string value) +#images_type=default + +# LVM Volume Group that is used for VM images, when you +# specify images_type=lvm. (string value) +#images_volume_group= + +# Create sparse logical volumes (with virtualsize) if this +# flag is set to True. (boolean value) +#sparse_logical_volumes=false + +# Method used to wipe old volumes (valid options are: none, +# zero, shred) (string value) +#volume_clear=zero + +# Size in MiB to wipe at start of old volumes. 0 => all +# (integer value) +#volume_clear_size=0 + +# The RADOS pool in which rbd volumes are stored (string +# value) +#images_rbd_pool=rbd + +# Path to the ceph configuration file to use (string value) +#images_rbd_ceph_conf= + +# Discard option for nova managed disks (valid options are: +# ignore, unmap). Need Libvirt(1.0.6) Qemu1.5 (raw format) +# Qemu1.6(qcow2 format) (string value) +#hw_disk_discard= + + +# +# Options defined in nova.virt.libvirt.imagecache +# + +# Allows image information files to be stored in non-standard +# locations (string value) +#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info + +# Should unused kernel images be removed? This is only safe to +# enable if all compute nodes have been updated to support +# this option. This will be enabled by default in future. +# (boolean value) +#remove_unused_kernels=false + +# Unused resized base images younger than this will not be +# removed (integer value) +#remove_unused_resized_minimum_age_seconds=3600 + +# Write a checksum for files in _base to disk (boolean value) +#checksum_base_images=false + +# How frequently to checksum base images (integer value) +#checksum_interval_seconds=3600 + + +# +# Options defined in nova.virt.libvirt.utils +# + +# Compress snapshot images when possible. This currently +# applies exclusively to qcow2 images (boolean value) +#snapshot_compression=false + + +# +# Options defined in nova.virt.libvirt.vif +# + +# Use virtio for bridge interfaces with KVM/QEMU (boolean +# value) +#use_virtio_for_bridges=true + + +# +# Options defined in nova.virt.libvirt.volume +# + +# Number of times to rescan iSCSI target to find volume +# (integer value) +#num_iscsi_scan_tries=5 + +# Number of times to rescan iSER target to find volume +# (integer value) +#num_iser_scan_tries=5 + +# The RADOS client name for accessing rbd volumes (string +# value) +#rbd_user= + +# The libvirt UUID of the secret for the rbd_uservolumes +# (string value) +#rbd_secret_uuid= + +# Directory where the NFS volume is mounted on the compute +# node (string value) +#nfs_mount_point_base=$state_path/mnt + +# Mount options passedf to the NFS client. See section of the +# nfs man page for details (string value) +#nfs_mount_options= + +# Number of times to rediscover AoE target to find volume +# (integer value) +#num_aoe_discover_tries=3 + +# Directory where the glusterfs volume is mounted on the +# compute node (string value) +#glusterfs_mount_point_base=$state_path/mnt + +# Use multipath connection of the iSCSI volume (boolean value) +#iscsi_use_multipath=false + +# Use multipath connection of the iSER volume (boolean value) +#iser_use_multipath=false + +# Path or URL to Scality SOFS configuration file (string +# value) +#scality_sofs_config= + +# Base dir where Scality SOFS shall be mounted (string value) +#scality_sofs_mount_point=$state_path/scality + +# Protocols listed here will be accessed directly from QEMU. +# Currently supported protocols: [gluster] (list value) +#qemu_allowed_storage_drivers= + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[metrics] + +# +# Options defined in nova.scheduler.weights.metrics +# + +# Multiplier used for weighing metrics. (floating point value) +#weight_multiplier=1.0 + +# How the metrics are going to be weighed. This should be in +# the form of "=, =, ...", where +# is one of the metrics to be weighed, and is +# the corresponding ratio. So for "name1=1.0, name2=-1.0" The +# final weight would be name1.value * 1.0 + name2.value * +# -1.0. (list value) +#weight_setting= + +# How to treat the unavailable metrics. When a metric is NOT +# available for a host, if it is set to be True, it would +# raise an exception, so it is recommended to use the +# scheduler filter MetricFilter to filter out those hosts. If +# it is set to be False, the unavailable metric would be +# treated as a negative factor in weighing process, the +# returned value would be set by the option +# weight_of_unavailable. (boolean value) +#required=true + +# The final weight value to be returned if required is set to +# False and any one of the metrics set by weight_setting is +# unavailable. (floating point value) +#weight_of_unavailable=-10000.0 + + +[neutron] + +# +# Options defined in nova.api.metadata.handler +# + +# Set flag to indicate Neutron will proxy metadata requests +# and resolve instance ids. (boolean value) +# Deprecated group/name - [DEFAULT]/service_neutron_metadata_proxy +service_metadata_proxy=True + +# Shared secret to validate proxies Neutron metadata requests +# (string value) +# Deprecated group/name - [DEFAULT]/neutron_metadata_proxy_shared_secret +metadata_proxy_shared_secret={{ METADATA_PROXY_SHARED_SECRET }} + + +# +# Options defined in nova.network.neutronv2.api +# + +# URL for connecting to neutron (string value) +# Deprecated group/name - [DEFAULT]/neutron_url +url=http://{{ CONTROLLER_HOST_ADDRESS }}:9696 + +# Timeout value for connecting to neutron in seconds (integer +# value) +# Deprecated group/name - [DEFAULT]/neutron_url_timeout +#url_timeout=30 + +# User id for connecting to neutron in admin context (string +# value) +#admin_user_id= + +# Username for connecting to neutron in admin context (string +# value) +# Deprecated group/name - [DEFAULT]/neutron_admin_username +admin_username={{ NEUTRON_SERVICE_USER }} + +# Password for connecting to neutron in admin context (string +# value) +# Deprecated group/name - [DEFAULT]/neutron_admin_password +admin_password={{ NEUTRON_SERVICE_PASSWORD }} + +# Tenant id for connecting to neutron in admin context (string +# value) +# Deprecated group/name - [DEFAULT]/neutron_admin_tenant_id +#admin_tenant_id= + +# Tenant name for connecting to neutron in admin context. This +# option will be ignored if neutron_admin_tenant_id is set. +# Note that with Keystone V3 tenant names are only unique +# within a domain. (string value) +# Deprecated group/name - [DEFAULT]/neutron_admin_tenant_name +admin_tenant_name=service + +# Region name for connecting to neutron in admin context +# (string value) +# Deprecated group/name - [DEFAULT]/neutron_region_name +#region_name= + +# Authorization URL for connecting to neutron in admin context +# (string value) +# Deprecated group/name - [DEFAULT]/neutron_admin_auth_url +admin_auth_url=http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 + +# If set, ignore any SSL validation issues (boolean value) +# Deprecated group/name - [DEFAULT]/neutron_api_insecure +#api_insecure=false + +# Authorization strategy for connecting to neutron in admin +# context (string value) +# Deprecated group/name - [DEFAULT]/neutron_auth_strategy +auth_strategy=keystone + +# Name of Integration Bridge used by Open vSwitch (string +# value) +# Deprecated group/name - [DEFAULT]/neutron_ovs_bridge +#ovs_bridge=br-int + +# Number of seconds before querying neutron for extensions +# (integer value) +# Deprecated group/name - [DEFAULT]/neutron_extension_sync_interval +#extension_sync_interval=600 + +# Location of CA certificates file to use for neutron client +# requests. (string value) +# Deprecated group/name - [DEFAULT]/neutron_ca_certificates_file +#ca_certificates_file= + +# Allow an instance to have multiple vNICs attached to the +# same Neutron network. (boolean value) +#allow_duplicate_networks=false + + +[osapi_v3] + +# +# Options defined in nova.api.openstack +# + +# Whether the V3 API is enabled or not (boolean value) +#enabled=false + +# A list of v3 API extensions to never load. Specify the +# extension aliases here. (list value) +#extensions_blacklist= + +# If the list is not empty then a v3 API extension will only +# be loaded if it exists in this list. Specify the extension +# aliases here. (list value) +#extensions_whitelist= + + +[rdp] + +# +# Options defined in nova.rdp +# + +# Location of RDP html5 console proxy, in the form +# "http://127.0.0.1:6083/" (string value) +#html5_proxy_base_url=http://127.0.0.1:6083/ + +# Enable RDP related features (boolean value) +#enabled=false + + +[serial_console] + +# +# Options defined in nova.cmd.serialproxy +# + +# Host on which to listen for incoming requests (string value) +serialproxy_host=127.0.0.1 + +# Port on which to listen for incoming requests (integer +# value) +#serialproxy_port=6083 + + +# +# Options defined in nova.console.serial +# + +# Enable serial console related features (boolean value) +enabled=false + +# Range of TCP ports to use for serial ports on compute hosts +# (string value) +#port_range=10000:20000 + +# Location of serial console proxy. (string value) +#base_url=ws://127.0.0.1:6083/ + +# IP address on which instance serial console should listen +# (string value) +#listen=127.0.0.1 + +# The address to which proxy clients (like nova-serialproxy) +# should connect (string value) +#proxyclient_address=127.0.0.1 + + +[spice] + +# +# Options defined in nova.cmd.spicehtml5proxy +# + +# Host on which to listen for incoming requests (string value) +# Deprecated group/name - [DEFAULT]/spicehtml5proxy_host +#html5proxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer +# value) +# Deprecated group/name - [DEFAULT]/spicehtml5proxy_port +#html5proxy_port=6082 + + +# +# Options defined in nova.spice +# + +# Location of spice HTML5 console proxy, in the form +# "http://127.0.0.1:6082/spice_auto.html" (string value) +#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html + +# IP address on which instance spice server should listen +# (string value) +#server_listen=127.0.0.1 + +# The address to which proxy clients (like nova- +# spicehtml5proxy) should connect (string value) +#server_proxyclient_address=127.0.0.1 + +# Enable spice related features (boolean value) +enabled=false + +# Enable spice guest agent support (boolean value) +#agent_enabled=true + +# Keymap for spice (string value) +#keymap=en-us + + +[ssl] + +# +# Options defined in nova.openstack.common.sslutils +# + +# CA certificate file to use to verify connecting clients. +# (string value) +#ca_file= + +# Certificate file to use when starting the server securely. +# (string value) +#cert_file= + +# Private key file to use when starting the server securely. +# (string value) +#key_file= + + +[trusted_computing] + +# +# Options defined in nova.scheduler.filters.trusted_filter +# + +# Attestation server HTTP (string value) +#attestation_server= + +# Attestation server Cert file for Identity verification +# (string value) +#attestation_server_ca_file= + +# Attestation server port (string value) +#attestation_port=8443 + +# Attestation web API URL (string value) +#attestation_api_url=/OpenAttestationWebServices/V1.0 + +# Attestation authorization blob - must change (string value) +#attestation_auth_blob= + +# Attestation status cache valid period length (integer value) +#attestation_auth_timeout=60 + +# Disable SSL cert verification for Attestation service +# (boolean value) +#attestation_insecure_ssl=false + + +[upgrade_levels] + +# +# Options defined in nova.baserpc +# + +# Set a version cap for messages sent to the base api in any +# service (string value) +#baseapi= + + +# +# Options defined in nova.cells.rpc_driver +# + +# Set a version cap for messages sent between cells services +# (string value) +#intercell= + + +# +# Options defined in nova.cells.rpcapi +# + +# Set a version cap for messages sent to local cells services +# (string value) +#cells= + + +# +# Options defined in nova.cert.rpcapi +# + +# Set a version cap for messages sent to cert services (string +# value) +#cert= + + +# +# Options defined in nova.compute.rpcapi +# + +# Set a version cap for messages sent to compute services. If +# you plan to do a live upgrade from havana to icehouse, you +# should set this option to "icehouse-compat" before beginning +# the live upgrade procedure. (string value) +#compute= + + +# +# Options defined in nova.conductor.rpcapi +# + +# Set a version cap for messages sent to conductor services +# (string value) +#conductor= + + +# +# Options defined in nova.console.rpcapi +# + +# Set a version cap for messages sent to console services +# (string value) +#console= + + +# +# Options defined in nova.consoleauth.rpcapi +# + +# Set a version cap for messages sent to consoleauth services +# (string value) +#consoleauth= + + +# +# Options defined in nova.network.rpcapi +# + +# Set a version cap for messages sent to network services +# (string value) +#network= + + +# +# Options defined in nova.scheduler.rpcapi +# + +# Set a version cap for messages sent to scheduler services +# (string value) +#scheduler= + + +[vmware] + +# +# Options defined in nova.virt.vmwareapi.driver +# + +# Hostname or IP address for connection to VMware VC host. +# (string value) +#host_ip= + +# Port for connection to VMware VC host. (integer value) +#host_port=443 + +# Username for connection to VMware VC host. (string value) +#host_username= + +# Password for connection to VMware VC host. (string value) +#host_password= + +# Name of a VMware Cluster ComputeResource. (multi valued) +#cluster_name= + +# Regex to match the name of a datastore. (string value) +#datastore_regex= + +# The interval used for polling of remote tasks. (floating +# point value) +#task_poll_interval=0.5 + +# The number of times we retry on failures, e.g., socket +# error, etc. (integer value) +#api_retry_count=10 + +# VNC starting port (integer value) +#vnc_port=5900 + +# Total number of VNC ports (integer value) +#vnc_port_total=10000 + +# Whether to use linked clone (boolean value) +#use_linked_clone=true + +# Optional VIM Service WSDL Location e.g +# http:///vimService.wsdl. Optional over-ride to +# default location for bug work-arounds (string value) +#wsdl_location= + + +# +# Options defined in nova.virt.vmwareapi.vif +# + +# Physical ethernet adapter name for vlan networking (string +# value) +#vlan_interface=vmnic0 + +# Name of Integration Bridge (string value) +#integration_bridge=br-int + + +# +# Options defined in nova.virt.vmwareapi.vim_util +# + +# The maximum number of ObjectContent data objects that should +# be returned in a single result. A positive value will cause +# the operation to suspend the retrieval when the count of +# objects reaches the specified maximum. The server may still +# limit the count to something less than the configured value. +# Any remaining objects may be retrieved with additional +# requests. (integer value) +#maximum_objects=100 + + +[xenserver] + +# +# Options defined in nova.virt.xenapi.agent +# + +# Number of seconds to wait for agent reply (integer value) +#agent_timeout=30 + +# Number of seconds to wait for agent to be fully operational +# (integer value) +#agent_version_timeout=300 + +# Number of seconds to wait for agent reply to resetnetwork +# request (integer value) +#agent_resetnetwork_timeout=60 + +# Specifies the path in which the XenAPI guest agent should be +# located. If the agent is present, network configuration is +# not injected into the image. Used if +# compute_driver=xenapi.XenAPIDriver and flat_injected=True +# (string value) +#agent_path=usr/sbin/xe-update-networking + +# Disables the use of the XenAPI agent in any image regardless +# of what image properties are present. (boolean value) +#disable_agent=false + +# Determines if the XenAPI agent should be used when the image +# used does not contain a hint to declare if the agent is +# present or not. The hint is a glance property +# "xenapi_use_agent" that has the value "True" or "False". +# Note that waiting for the agent when it is not present will +# significantly increase server boot times. (boolean value) +#use_agent_default=false + + +# +# Options defined in nova.virt.xenapi.client.session +# + +# Timeout in seconds for XenAPI login. (integer value) +#login_timeout=10 + +# Maximum number of concurrent XenAPI connections. Used only +# if compute_driver=xenapi.XenAPIDriver (integer value) +#connection_concurrent=5 + + +# +# Options defined in nova.virt.xenapi.driver +# + +# URL for connection to XenServer/Xen Cloud Platform. A +# special value of unix://local can be used to connect to the +# local unix socket. Required if +# compute_driver=xenapi.XenAPIDriver (string value) +#connection_url= + +# Username for connection to XenServer/Xen Cloud Platform. +# Used only if compute_driver=xenapi.XenAPIDriver (string +# value) +#connection_username=root + +# Password for connection to XenServer/Xen Cloud Platform. +# Used only if compute_driver=xenapi.XenAPIDriver (string +# value) +#connection_password= + +# The interval used for polling of coalescing vhds. Used only +# if compute_driver=xenapi.XenAPIDriver (floating point value) +#vhd_coalesce_poll_interval=5.0 + +# Ensure compute service is running on host XenAPI connects +# to. (boolean value) +#check_host=true + +# Max number of times to poll for VHD to coalesce. Used only +# if compute_driver=xenapi.XenAPIDriver (integer value) +#vhd_coalesce_max_attempts=20 + +# Base path to the storage repository (string value) +#sr_base_path=/var/run/sr-mount + +# The iSCSI Target Host (string value) +#target_host= + +# The iSCSI Target Port, default is port 3260 (string value) +#target_port=3260 + +# IQN Prefix (string value) +#iqn_prefix=iqn.2010-10.org.openstack + +# Used to enable the remapping of VBD dev (Works around an +# issue in Ubuntu Maverick) (boolean value) +#remap_vbd_dev=false + +# Specify prefix to remap VBD dev to (ex. /dev/xvdb -> +# /dev/sdb) (string value) +#remap_vbd_dev_prefix=sd + + +# +# Options defined in nova.virt.xenapi.image.bittorrent +# + +# Base URL for torrent files. (string value) +#torrent_base_url= + +# Probability that peer will become a seeder. (1.0 = 100%) +# (floating point value) +#torrent_seed_chance=1.0 + +# Number of seconds after downloading an image via BitTorrent +# that it should be seeded for other peers. (integer value) +#torrent_seed_duration=3600 + +# Cached torrent files not accessed within this number of +# seconds can be reaped (integer value) +#torrent_max_last_accessed=86400 + +# Beginning of port range to listen on (integer value) +#torrent_listen_port_start=6881 + +# End of port range to listen on (integer value) +#torrent_listen_port_end=6891 + +# Number of seconds a download can remain at the same progress +# percentage w/o being considered a stall (integer value) +#torrent_download_stall_cutoff=600 + +# Maximum number of seeder processes to run concurrently +# within a given dom0. (-1 = no limit) (integer value) +#torrent_max_seeder_processes_per_host=1 + + +# +# Options defined in nova.virt.xenapi.pool +# + +# To use for hosts with different CPUs (boolean value) +#use_join_force=true + + +# +# Options defined in nova.virt.xenapi.vif +# + +# Name of Integration Bridge used by Open vSwitch (string +# value) +#ovs_integration_bridge=xapi1 + + +# +# Options defined in nova.virt.xenapi.vm_utils +# + +# Cache glance images locally. `all` will cache all images, +# `some` will only cache images that have the image_property +# `cache_in_nova=True`, and `none` turns off caching entirely +# (string value) +#cache_images=all + +# Compression level for images, e.g., 9 for gzip -9. Range is +# 1-9, 9 being most compressed but most CPU intensive on dom0. +# (integer value) +#image_compression_level= + +# Default OS type (string value) +#default_os_type=linux + +# Time to wait for a block device to be created (integer +# value) +#block_device_creation_timeout=10 + +# Maximum size in bytes of kernel or ramdisk images (integer +# value) +#max_kernel_ramdisk_size=16777216 + +# Filter for finding the SR to be used to install guest +# instances on. To use the Local Storage in default +# XenServer/XCP installations set this flag to other-config +# :i18n-key=local-storage. To select an SR with a different +# matching criteria, you could set it to other- +# config:my_favorite_sr=true. On the other hand, to fall back +# on the Default SR, as displayed by XenCenter, set this flag +# to: default-sr:true (string value) +#sr_matching_filter=default-sr:true + +# Whether to use sparse_copy for copying data on a resize down +# (False will use standard dd). This speeds up resizes down +# considerably since large runs of zeros won't have to be +# rsynced (boolean value) +#sparse_copy=true + +# Maximum number of retries to unplug VBD (integer value) +#num_vbd_unplug_retries=10 + +# Whether or not to download images via Bit Torrent +# (all|some|none). (string value) +#torrent_images=none + +# Name of network to use for booting iPXE ISOs (string value) +#ipxe_network_name= + +# URL to the iPXE boot menu (string value) +#ipxe_boot_menu_url= + +# Name and optionally path of the tool used for ISO image +# creation (string value) +#ipxe_mkisofs_cmd=mkisofs + + +# +# Options defined in nova.virt.xenapi.vmops +# + +# Number of seconds to wait for instance to go to running +# state (integer value) +#running_timeout=60 + +# The XenAPI VIF driver using XenServer Network APIs. (string +# value) +#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver + +# Dom0 plugin driver used to handle image uploads. (string +# value) +#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore + + +# +# Options defined in nova.virt.xenapi.volume_utils +# + +# Number of seconds to wait for an SR to settle if the VDI +# does not exist when first introduced (integer value) +#introduce_vdi_retry_wait=20 + + +[zookeeper] + +# +# Options defined in nova.servicegroup.drivers.zk +# + +# The ZooKeeper addresses for servicegroup service in the +# format of host1:port,host2:port,host3:port (string value) +#address= + +# The recv_timeout parameter for the zk session (integer +# value) +#recv_timeout=4000 + +# The prefix used in ZooKeeper to store ephemeral nodes +# (string value) +#sg_prefix=/servicegroups + +# Number of seconds to wait until retrying to join the session +# (integer value) +#sg_retry_interval=5 + +[database] + +# The SQLAlchemy connection string to use to connect to the +# database. (string value) +connection=postgresql://{{ NOVA_DB_USER }}:{{ NOVA_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/nova diff --git a/install-files/openstack/usr/share/openstack/nova/policy.json b/install-files/openstack/usr/share/openstack/nova/policy.json new file mode 100644 index 00000000..cc5b8ea4 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/nova/policy.json @@ -0,0 +1,324 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "cells_scheduler_filter:TargetCellFilter": "is_admin:True", + + "compute:create": "", + "compute:create:attach_network": "", + "compute:create:attach_volume": "", + "compute:create:forced_host": "is_admin:True", + "compute:get_all": "", + "compute:get_all_tenants": "", + "compute:start": "rule:admin_or_owner", + "compute:stop": "rule:admin_or_owner", + "compute:unlock_override": "rule:admin_api", + + "compute:shelve": "", + "compute:shelve_offload": "", + "compute:unshelve": "", + + "compute:volume_snapshot_create": "", + "compute:volume_snapshot_delete": "", + + "admin_api": "is_admin:True", + "compute:v3:servers:start": "rule:admin_or_owner", + "compute:v3:servers:stop": "rule:admin_or_owner", + "compute_extension:v3:os-access-ips:discoverable": "", + "compute_extension:v3:os-access-ips": "", + "compute_extension:accounts": "rule:admin_api", + "compute_extension:admin_actions": "rule:admin_api", + "compute_extension:admin_actions:pause": "rule:admin_or_owner", + "compute_extension:admin_actions:unpause": "rule:admin_or_owner", + "compute_extension:admin_actions:suspend": "rule:admin_or_owner", + "compute_extension:admin_actions:resume": "rule:admin_or_owner", + "compute_extension:admin_actions:lock": "rule:admin_or_owner", + "compute_extension:admin_actions:unlock": "rule:admin_or_owner", + "compute_extension:admin_actions:resetNetwork": "rule:admin_api", + "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api", + "compute_extension:admin_actions:createBackup": "rule:admin_or_owner", + "compute_extension:admin_actions:migrateLive": "rule:admin_api", + "compute_extension:admin_actions:resetState": "rule:admin_api", + "compute_extension:admin_actions:migrate": "rule:admin_api", + "compute_extension:v3:os-admin-actions": "rule:admin_api", + "compute_extension:v3:os-admin-actions:discoverable": "", + "compute_extension:v3:os-admin-actions:reset_network": "rule:admin_api", + "compute_extension:v3:os-admin-actions:inject_network_info": "rule:admin_api", + "compute_extension:v3:os-admin-actions:reset_state": "rule:admin_api", + "compute_extension:v3:os-admin-password": "", + "compute_extension:v3:os-admin-password:discoverable": "", + "compute_extension:aggregates": "rule:admin_api", + "compute_extension:v3:os-aggregates:discoverable": "", + "compute_extension:v3:os-aggregates:index": "rule:admin_api", + "compute_extension:v3:os-aggregates:create": "rule:admin_api", + "compute_extension:v3:os-aggregates:show": "rule:admin_api", + "compute_extension:v3:os-aggregates:update": "rule:admin_api", + "compute_extension:v3:os-aggregates:delete": "rule:admin_api", + "compute_extension:v3:os-aggregates:add_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api", + "compute_extension:agents": "rule:admin_api", + "compute_extension:v3:os-agents": "rule:admin_api", + "compute_extension:v3:os-agents:discoverable": "", + "compute_extension:attach_interfaces": "", + "compute_extension:v3:os-attach-interfaces": "", + "compute_extension:v3:os-attach-interfaces:discoverable": "", + "compute_extension:baremetal_nodes": "rule:admin_api", + "compute_extension:cells": "rule:admin_api", + "compute_extension:v3:os-cells": "rule:admin_api", + "compute_extension:v3:os-cells:discoverable": "", + "compute_extension:certificates": "", + "compute_extension:v3:os-certificates:create": "", + "compute_extension:v3:os-certificates:show": "", + "compute_extension:v3:os-certificates:discoverable": "", + "compute_extension:cloudpipe": "rule:admin_api", + "compute_extension:cloudpipe_update": "rule:admin_api", + "compute_extension:console_output": "", + "compute_extension:v3:consoles:discoverable": "", + "compute_extension:v3:os-console-output:discoverable": "", + "compute_extension:v3:os-console-output": "", + "compute_extension:consoles": "", + "compute_extension:v3:os-remote-consoles": "", + "compute_extension:v3:os-remote-consoles:discoverable": "", + "compute_extension:createserverext": "", + "compute_extension:v3:os-create-backup:discoverable": "", + "compute_extension:v3:os-create-backup": "rule:admin_or_owner", + "compute_extension:deferred_delete": "", + "compute_extension:v3:os-deferred-delete": "", + "compute_extension:v3:os-deferred-delete:discoverable": "", + "compute_extension:disk_config": "", + "compute_extension:evacuate": "rule:admin_api", + "compute_extension:v3:os-evacuate": "rule:admin_api", + "compute_extension:v3:os-evacuate:discoverable": "", + "compute_extension:extended_server_attributes": "rule:admin_api", + "compute_extension:v3:os-extended-server-attributes": "rule:admin_api", + "compute_extension:v3:os-extended-server-attributes:discoverable": "", + "compute_extension:extended_status": "", + "compute_extension:v3:os-extended-status": "", + "compute_extension:v3:os-extended-status:discoverable": "", + "compute_extension:extended_availability_zone": "", + "compute_extension:v3:os-extended-availability-zone": "", + "compute_extension:v3:os-extended-availability-zone:discoverable": "", + "compute_extension:extended_ips": "", + "compute_extension:extended_ips_mac": "", + "compute_extension:extended_vif_net": "", + "compute_extension:v3:extension_info:discoverable": "", + "compute_extension:extended_volumes": "", + "compute_extension:v3:os-extended-volumes": "", + "compute_extension:v3:os-extended-volumes:swap": "", + "compute_extension:v3:os-extended-volumes:discoverable": "", + "compute_extension:v3:os-extended-volumes:attach": "", + "compute_extension:v3:os-extended-volumes:detach": "", + "compute_extension:fixed_ips": "rule:admin_api", + "compute_extension:flavor_access": "", + "compute_extension:flavor_access:addTenantAccess": "rule:admin_api", + "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api", + "compute_extension:v3:flavor-access": "", + "compute_extension:v3:flavor-access:discoverable": "", + "compute_extension:v3:flavor-access:remove_tenant_access": "rule:admin_api", + "compute_extension:v3:flavor-access:add_tenant_access": "rule:admin_api", + "compute_extension:flavor_disabled": "", + "compute_extension:flavor_rxtx": "", + "compute_extension:v3:os-flavor-rxtx": "", + "compute_extension:v3:os-flavor-rxtx:discoverable": "", + "compute_extension:flavor_swap": "", + "compute_extension:flavorextradata": "", + "compute_extension:flavorextraspecs:index": "", + "compute_extension:flavorextraspecs:show": "", + "compute_extension:flavorextraspecs:create": "rule:admin_api", + "compute_extension:flavorextraspecs:update": "rule:admin_api", + "compute_extension:flavorextraspecs:delete": "rule:admin_api", + "compute_extension:v3:flavors:discoverable": "", + "compute_extension:v3:flavor-extra-specs:discoverable": "", + "compute_extension:v3:flavor-extra-specs:index": "", + "compute_extension:v3:flavor-extra-specs:show": "", + "compute_extension:v3:flavor-extra-specs:create": "rule:admin_api", + "compute_extension:v3:flavor-extra-specs:update": "rule:admin_api", + "compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api", + "compute_extension:flavormanage": "rule:admin_api", + "compute_extension:v3:flavor-manage": "rule:admin_api", + "compute_extension:floating_ip_dns": "", + "compute_extension:floating_ip_pools": "", + "compute_extension:floating_ips": "", + "compute_extension:floating_ips_bulk": "rule:admin_api", + "compute_extension:fping": "", + "compute_extension:fping:all_tenants": "rule:admin_api", + "compute_extension:hide_server_addresses": "is_admin:False", + "compute_extension:v3:os-hide-server-addresses": "is_admin:False", + "compute_extension:v3:os-hide-server-addresses:discoverable": "", + "compute_extension:hosts": "rule:admin_api", + "compute_extension:v3:os-hosts": "rule:admin_api", + "compute_extension:v3:os-hosts:discoverable": "", + "compute_extension:hypervisors": "rule:admin_api", + "compute_extension:v3:os-hypervisors": "rule:admin_api", + "compute_extension:v3:os-hypervisors:discoverable": "", + "compute_extension:image_size": "", + "compute_extension:instance_actions": "", + "compute_extension:v3:os-server-actions": "", + "compute_extension:v3:os-server-actions:discoverable": "", + "compute_extension:instance_actions:events": "rule:admin_api", + "compute_extension:v3:os-server-actions:events": "rule:admin_api", + "compute_extension:instance_usage_audit_log": "rule:admin_api", + "compute_extension:v3:ips:discoverable": "", + "compute_extension:keypairs": "", + "compute_extension:keypairs:index": "", + "compute_extension:keypairs:show": "", + "compute_extension:keypairs:create": "", + "compute_extension:keypairs:delete": "", + "compute_extension:v3:keypairs:discoverable": "", + "compute_extension:v3:keypairs": "", + "compute_extension:v3:keypairs:index": "", + "compute_extension:v3:keypairs:show": "", + "compute_extension:v3:keypairs:create": "", + "compute_extension:v3:keypairs:delete": "", + "compute_extension:v3:os-lock-server:discoverable": "", + "compute_extension:v3:os-lock-server:lock": "rule:admin_or_owner", + "compute_extension:v3:os-lock-server:unlock": "rule:admin_or_owner", + "compute_extension:v3:os-migrate-server:discoverable": "", + "compute_extension:v3:os-migrate-server:migrate": "rule:admin_api", + "compute_extension:v3:os-migrate-server:migrate_live": "rule:admin_api", + "compute_extension:multinic": "", + "compute_extension:v3:os-multinic": "", + "compute_extension:v3:os-multinic:discoverable": "", + "compute_extension:networks": "rule:admin_api", + "compute_extension:networks:view": "", + "compute_extension:networks_associate": "rule:admin_api", + "compute_extension:v3:os-pause-server:discoverable": "", + "compute_extension:v3:os-pause-server:pause": "rule:admin_or_owner", + "compute_extension:v3:os-pause-server:unpause": "rule:admin_or_owner", + "compute_extension:v3:os-pci:pci_servers": "", + "compute_extension:v3:os-pci:discoverable": "", + "compute_extension:v3:os-pci:index": "rule:admin_api", + "compute_extension:v3:os-pci:detail": "rule:admin_api", + "compute_extension:v3:os-pci:show": "rule:admin_api", + "compute_extension:quotas:show": "", + "compute_extension:quotas:update": "rule:admin_api", + "compute_extension:quotas:delete": "rule:admin_api", + "compute_extension:v3:os-quota-sets:discoverable": "", + "compute_extension:v3:os-quota-sets:show": "", + "compute_extension:v3:os-quota-sets:update": "rule:admin_api", + "compute_extension:v3:os-quota-sets:delete": "rule:admin_api", + "compute_extension:v3:os-quota-sets:detail": "rule:admin_api", + "compute_extension:quota_classes": "", + "compute_extension:rescue": "", + "compute_extension:v3:os-rescue": "", + "compute_extension:v3:os-rescue:discoverable": "", + "compute_extension:v3:os-scheduler-hints:discoverable": "", + "compute_extension:security_group_default_rules": "rule:admin_api", + "compute_extension:security_groups": "", + "compute_extension:v3:os-security-groups": "", + "compute_extension:v3:os-security-groups:discoverable": "", + "compute_extension:server_diagnostics": "rule:admin_api", + "compute_extension:v3:os-server-diagnostics": "rule:admin_api", + "compute_extension:v3:os-server-diagnostics:discoverable": "", + "compute_extension:server_groups": "", + "compute_extension:server_password": "", + "compute_extension:v3:os-server-password": "", + "compute_extension:v3:os-server-password:discoverable": "", + "compute_extension:server_usage": "", + "compute_extension:v3:os-server-usage": "", + "compute_extension:v3:os-server-usage:discoverable": "", + "compute_extension:services": "rule:admin_api", + "compute_extension:v3:os-services": "rule:admin_api", + "compute_extension:v3:os-services:discoverable": "", + "compute_extension:v3:server-metadata:discoverable": "", + "compute_extension:v3:servers:discoverable": "", + "compute_extension:shelve": "", + "compute_extension:shelveOffload": "rule:admin_api", + "compute_extension:v3:os-shelve:shelve": "", + "compute_extension:v3:os-shelve:shelve:discoverable": "", + "compute_extension:v3:os-shelve:shelve_offload": "rule:admin_api", + "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner", + "compute_extension:v3:os-suspend-server:discoverable": "", + "compute_extension:v3:os-suspend-server:suspend": "rule:admin_or_owner", + "compute_extension:v3:os-suspend-server:resume": "rule:admin_or_owner", + "compute_extension:simple_tenant_usage:list": "rule:admin_api", + "compute_extension:unshelve": "", + "compute_extension:v3:os-shelve:unshelve": "", + "compute_extension:users": "rule:admin_api", + "compute_extension:v3:os-user-data:discoverable": "", + "compute_extension:virtual_interfaces": "", + "compute_extension:virtual_storage_arrays": "", + "compute_extension:volumes": "", + "compute_extension:volume_attachments:index": "", + "compute_extension:volume_attachments:show": "", + "compute_extension:volume_attachments:create": "", + "compute_extension:volume_attachments:update": "", + "compute_extension:volume_attachments:delete": "", + "compute_extension:volumetypes": "", + "compute_extension:availability_zone:list": "", + "compute_extension:v3:os-availability-zone:list": "", + "compute_extension:v3:os-availability-zone:discoverable": "", + "compute_extension:availability_zone:detail": "rule:admin_api", + "compute_extension:v3:os-availability-zone:detail": "rule:admin_api", + "compute_extension:used_limits_for_admin": "rule:admin_api", + "compute_extension:migrations:index": "rule:admin_api", + "compute_extension:v3:os-migrations:index": "rule:admin_api", + "compute_extension:v3:os-migrations:discoverable": "", + "compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api", + "compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api", + "compute_extension:console_auth_tokens": "rule:admin_api", + "compute_extension:v3:os-console-auth-tokens": "rule:admin_api", + "compute_extension:os-server-external-events:create": "rule:admin_api", + "compute_extension:v3:os-server-external-events:create": "rule:admin_api", + + "volume:create": "", + "volume:get_all": "", + "volume:get_volume_metadata": "", + "volume:get_snapshot": "", + "volume:get_all_snapshots": "", + + + "volume_extension:types_manage": "rule:admin_api", + "volume_extension:types_extra_specs": "rule:admin_api", + "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", + "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", + "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", + + + "network:get_all": "", + "network:get": "", + "network:create": "", + "network:delete": "", + "network:associate": "", + "network:disassociate": "", + "network:get_vifs_by_instance": "", + "network:allocate_for_instance": "", + "network:deallocate_for_instance": "", + "network:validate_networks": "", + "network:get_instance_uuids_by_ip_filter": "", + "network:get_instance_id_by_floating_address": "", + "network:setup_networks_on_host": "", + "network:get_backdoor_port": "", + + "network:get_floating_ip": "", + "network:get_floating_ip_pools": "", + "network:get_floating_ip_by_address": "", + "network:get_floating_ips_by_project": "", + "network:get_floating_ips_by_fixed_address": "", + "network:allocate_floating_ip": "", + "network:deallocate_floating_ip": "", + "network:associate_floating_ip": "", + "network:disassociate_floating_ip": "", + "network:release_floating_ip": "", + "network:migrate_instance_start": "", + "network:migrate_instance_finish": "", + + "network:get_fixed_ip": "", + "network:get_fixed_ip_by_address": "", + "network:add_fixed_ip_to_instance": "", + "network:remove_fixed_ip_from_instance": "", + "network:add_network_to_project": "", + "network:get_instance_nw_info": "", + + "network:get_dns_domains": "", + "network:add_dns_entry": "", + "network:modify_dns_entry": "", + "network:delete_dns_entry": "", + "network:get_dns_entries_by_address": "", + "network:get_dns_entries_by_name": "", + "network:create_private_dns_domain": "", + "network:create_public_dns_domain": "", + "network:delete_dns_domain": "" +} diff --git a/install-files/openstack/usr/share/openstack/openvswitch.yml b/install-files/openstack/usr/share/openstack/openvswitch.yml new file mode 100644 index 00000000..47257f7f --- /dev/null +++ b/install-files/openstack/usr/share/openstack/openvswitch.yml @@ -0,0 +1,38 @@ +--- +- hosts: localhost + tasks: + + - name: Create openvswitch directories + file: path={{ item }} state=directory + with_items: + - /etc/openvswitch + - /var/run/openvswitch + + - shell: > + ovsdb-tool create /etc/openvswitch/conf.db /usr/share/openvswitch/vswitch.ovsschema + creates=/etc/openvswitch/conf.db + + # We enable the openvswitch-db-server in a different task to identify + # the first time we run this script by identifying when we enable the + # unit. + # + # We need to identify this to initialise the database. + - name: Enable openvswitch database service + service: name={{ item }} enabled=yes + with_items: + - openvswitch-db-server.service + register: openvswitch_db_enable + + - name: Start openvswitch database service + service: name={{ item }} state=started + with_items: + - openvswitch-db-server.service + + - name: initialise openvswitch-db + shell: ovs-vsctl --no-wait init + when: openvswitch_db_enable|changed + + - name: Enable and start Open vSwitch service + service: name={{ item }} enabled=yes state=started + with_items: + - openvswitch.service diff --git a/install-files/openstack/usr/share/openstack/postgres.yml b/install-files/openstack/usr/share/openstack/postgres.yml new file mode 100644 index 00000000..5ff9355e --- /dev/null +++ b/install-files/openstack/usr/share/openstack/postgres.yml @@ -0,0 +1,48 @@ +--- +- hosts: localhost + vars_files: + - "/etc/openstack/postgres.conf" + tasks: + + - name: Create postgres user + user: + name: postgres + comment: PostgreSQL Server + shell: /sbin/nologin + home: /var/lib/pgsql + + - name: Create the postgres directories + file: + path: "{{ item }}" + state: directory + owner: postgres + group: postgres + with_items: + - /var/run/postgresql + - /var/lib/pgsql/data + + - name: Initialise postgres database + command: pg_ctl -D /var/lib/pgsql/data initdb + args: + creates: /var/lib/pgsql/data/base + sudo: yes + sudo_user: postgres + + - name: Add the configuration needed for postgres for Openstack + template: + src: /usr/share/openstack/postgres/{{ item }} + dest: /var/lib/pgsql/data/{{ item }} + owner: postgres + group: postgres + mode: 0600 + with_items: + - postgresql.conf + - pg_hba.conf + + - name: Enable and start postgres services + service: + name: "{{ item }}" + enabled: yes + state: started + with_items: + - postgres-server diff --git a/install-files/openstack/usr/share/openstack/postgres/pg_hba.conf b/install-files/openstack/usr/share/openstack/postgres/pg_hba.conf new file mode 100644 index 00000000..78186924 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/postgres/pg_hba.conf @@ -0,0 +1,5 @@ +local all all trust +host all all 127.0.0.0/8 trust +host all all ::1/128 trust +host all all {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}/32 trust +host all all 0.0.0.0/0 md5 diff --git a/install-files/openstack/usr/share/openstack/postgres/postgresql.conf b/install-files/openstack/usr/share/openstack/postgres/postgresql.conf new file mode 100644 index 00000000..74153385 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/postgres/postgresql.conf @@ -0,0 +1,11 @@ +listen_addresses = '{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}' +max_connections = 100 +shared_buffers = 128MB +log_timezone = 'UTC' +datestyle = 'iso, mdy' +timezone = 'UTC' +lc_messages = 'C' +lc_monetary = 'C' +lc_numeric = 'C' +lc_time = 'C' +default_text_search_config = 'pg_catalog.english' diff --git a/install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf b/install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf new file mode 100644 index 00000000..d4c58dae --- /dev/null +++ b/install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf @@ -0,0 +1,3 @@ +# Configure port node where rabbitmq-server will listen from. +NODE_PORT={{ RABBITMQ_PORT }} +CONFIG_FILE=/etc/rabbitmq/rabbitmq diff --git a/install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq.config b/install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq.config new file mode 100644 index 00000000..9b93881e --- /dev/null +++ b/install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq.config @@ -0,0 +1,9 @@ +%% -*- Rabbit configuration for Openstack in Baserock +[ + {rabbit, + [ + {default_user, <<"{{ RABBITMQ_USER }}">>}, + {default_pass, <<"{{ RABBITMQ_PASSWORD }}">>}, + {tcp_listeners, [{{ RABBITMQ_PORT }}]} + ]} +]. diff --git a/install-files/openstack/usr/share/openstack/swift-controller.yml b/install-files/openstack/usr/share/openstack/swift-controller.yml new file mode 100644 index 00000000..690de087 --- /dev/null +++ b/install-files/openstack/usr/share/openstack/swift-controller.yml @@ -0,0 +1,52 @@ +--- +- hosts: localhost + vars_files: + - swift-controller-vars.yml + vars: + - ring_name_port_map: + account: + port: 6002 + container: + port: 6001 + object: + port: 6000 + remote_user: root + tasks: + - user: + name: swift + comment: Swift user + + - file: + path: /etc/swift + owner: swift + group: swift + state: directory + + - template: + src: /usr/share/swift/etc/swift/proxy-server.j2 + dest: /etc/swift/proxy-server.conf + mode: 0644 + owner: swift + group: swift + + - keystone_user: + user: swift + password: "{{ SWIFT_ADMIN_PASSWORD }}" + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - keystone_user: + role: admin + user: swift + tenant: service + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" + + - keystone_service: + name: swift + type: object-store + description: OpenStack Object Storage + publicurl: http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:8080/v1/AUTH_%(tenant_id)s + internalurl: http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:8080/v1/AUTH_%(tenant_id)s + adminurl: http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:8080 + region: regionOne + token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" diff --git a/install-files/openstack/usr/share/swift/etc/rsyncd.j2 b/install-files/openstack/usr/share/swift/etc/rsyncd.j2 new file mode 100644 index 00000000..c0657665 --- /dev/null +++ b/install-files/openstack/usr/share/swift/etc/rsyncd.j2 @@ -0,0 +1,23 @@ +uid = swift +gid = swift +log file = /var/log/rsyncd.log +pid file = /var/run/rsyncd.pid +address = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +[account] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/account.lock + +[container] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/container.lock + +[object] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/object.lock diff --git a/install-files/openstack/usr/share/swift/etc/swift/proxy-server.j2 b/install-files/openstack/usr/share/swift/etc/swift/proxy-server.j2 new file mode 100644 index 00000000..dda82d5a --- /dev/null +++ b/install-files/openstack/usr/share/swift/etc/swift/proxy-server.j2 @@ -0,0 +1,630 @@ +[DEFAULT] +# bind_ip = 0.0.0.0 +bind_port = 8080 +# bind_timeout = 30 +# backlog = 4096 +swift_dir = /etc/swift +user = swift + +# Enables exposing configuration settings via HTTP GET /info. +# expose_info = true + +# Key to use for admin calls that are HMAC signed. Default is empty, +# which will disable admin calls to /info. +# admin_key = secret_admin_key +# +# Allows the ability to withhold sections from showing up in the public calls +# to /info. You can withhold subsections by separating the dict level with a +# ".". The following would cause the sections 'container_quotas' and 'tempurl' +# to not be listed, and the key max_failed_deletes would be removed from +# bulk_delete. Default is empty, allowing all registered fetures to be listed +# via HTTP GET /info. +# disallowed_sections = container_quotas, tempurl, bulk_delete.max_failed_deletes + +# Use an integer to override the number of pre-forked processes that will +# accept connections. Should default to the number of effective cpu +# cores in the system. It's worth noting that individual workers will +# use many eventlet co-routines to service multiple concurrent requests. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# Set the following two lines to enable SSL. This is for testing only. +# cert_file = /etc/swift/proxy.crt +# key_file = /etc/swift/proxy.key +# +# expiring_objects_container_divisor = 86400 +# expiring_objects_account_name = expiring_objects +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_headers = false +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# This optional suffix (default is empty) that would be appended to the swift transaction +# id allows one to easily figure out from which cluster that X-Trans-Id belongs to. +# This is very useful when one is managing more than one swift cluster. +# trans_id_suffix = +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = localhost +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar) +# cors_allow_origin = +# strict_cors_mode = True +# +# client_timeout = 60 +# eventlet_debug = false + +[pipeline:main] +#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo proxy-logging proxy-server +pipeline = authtoken cache healthcheck keystoneauth proxy-logging proxy-server + +[app:proxy-server] +use = egg:swift#proxy +# You can override the default log routing for this app here: +# set log_name = proxy-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_address = /dev/log +# +# log_handoffs = true +# recheck_account_existence = 60 +# recheck_container_existence = 60 +# object_chunk_size = 65536 +# client_chunk_size = 65536 +# +# How long the proxy server will wait on responses from the a/c/o servers. +# node_timeout = 10 +# +# How long the proxy server will wait for an initial response and to read a +# chunk of data from the object servers while serving GET / HEAD requests. +# Timeouts from these requests can be recovered from so setting this to +# something lower than node_timeout would provide quicker error recovery +# while allowing for a longer timeout for non-recoverable requests (PUTs). +# Defaults to node_timeout, should be overriden if node_timeout is set to a +# high number to prevent client timeouts from firing before the proxy server +# has a chance to retry. +# recoverable_node_timeout = node_timeout +# +# conn_timeout = 0.5 +# +# How long to wait for requests to finish after a quorum has been established. +# post_quorum_timeout = 0.5 +# +# How long without an error before a node's error count is reset. This will +# also be how long before a node is reenabled after suppression is triggered. +# error_suppression_interval = 60 +# +# How many errors can accumulate before a node is temporarily ignored. +# error_suppression_limit = 10 +# +# If set to 'true' any authorized user may create and delete accounts; if +# 'false' no one, even authorized, can. +allow_account_management = true +# +# Set object_post_as_copy = false to turn on fast posts where only the metadata +# changes are stored anew and the original data file is kept in place. This +# makes for quicker posts; but since the container metadata isn't updated in +# this mode, features like container sync won't be able to sync posts. +# object_post_as_copy = true +# +# If set to 'true' authorized accounts that do not yet exist within the Swift +# cluster will be automatically created. +account_autocreate = true +# +# If set to a positive value, trying to create a container when the account +# already has at least this maximum containers will result in a 403 Forbidden. +# Note: This is a soft limit, meaning a user might exceed the cap for +# recheck_account_existence before the 403s kick in. +# max_containers_per_account = 0 +# +# This is a comma separated list of account hashes that ignore the +# max_containers_per_account cap. +# max_containers_whitelist = +# +# Comma separated list of Host headers to which the proxy will deny requests. +# deny_host_headers = +# +# Prefix used when automatically creating accounts. +# auto_create_account_prefix = . +# +# Depth of the proxy put queue. +# put_queue_depth = 10 +# +# Storage nodes can be chosen at random (shuffle), by using timing +# measurements (timing), or by using an explicit match (affinity). +# Using timing measurements may allow for lower overall latency, while +# using affinity allows for finer control. In both the timing and +# affinity cases, equally-sorting nodes are still randomly chosen to +# spread load. +# The valid values for sorting_method are "affinity", "shuffle", and "timing". +# sorting_method = shuffle +# +# If the "timing" sorting_method is used, the timings will only be valid for +# the number of seconds configured by timing_expiry. +# timing_expiry = 300 +# +# The maximum time (seconds) that a large object connection is allowed to last. +# max_large_object_get_time = 86400 +# +# Set to the number of nodes to contact for a normal request. You can use +# '* replicas' at the end to have it use the number given times the number of +# replicas for the ring being used for the request. +# request_node_count = 2 * replicas +# +# Which backend servers to prefer on reads. Format is r for region +# N or rz for region N, zone M. The value after the equals is +# the priority; lower numbers are higher priority. +# +# Example: first read from region 1 zone 1, then region 1 zone 2, then +# anything in region 2, then everything else: +# read_affinity = r1z1=100, r1z2=200, r2=300 +# Default is empty, meaning no preference. +# read_affinity = +# +# Which backend servers to prefer on writes. Format is r for region +# N or rz for region N, zone M. If this is set, then when +# handling an object PUT request, some number (see setting +# write_affinity_node_count) of local backend servers will be tried +# before any nonlocal ones. +# +# Example: try to write to regions 1 and 2 before writing to any other +# nodes: +# write_affinity = r1, r2 +# Default is empty, meaning no preference. +# write_affinity = +# +# The number of local (as governed by the write_affinity setting) +# nodes to attempt to contact first, before any non-local ones. You +# can use '* replicas' at the end to have it use the number given +# times the number of replicas for the ring being used for the +# request. +# write_affinity_node_count = 2 * replicas +# +# These are the headers whose values will only be shown to swift_owners. The +# exact definition of a swift_owner is up to the auth system in use, but +# usually indicates administrative responsibilities. +# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-account-access-control + +[filter:tempauth] +use = egg:swift#tempauth +# You can override the default log routing for this filter here: +# set log_name = tempauth +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# The reseller prefix will verify a token begins with this prefix before even +# attempting to validate it. Also, with authorization, only Swift storage +# accounts with this prefix will be authorized by this middleware. Useful if +# multiple auth systems are in use for one Swift cluster. +# reseller_prefix = AUTH +# +# The auth prefix will cause requests beginning with this prefix to be routed +# to the auth subsystem, for granting tokens, etc. +# auth_prefix = /auth/ +# token_life = 86400 +# +# This allows middleware higher in the WSGI pipeline to override auth +# processing, useful for middleware such as tempurl and formpost. If you know +# you're not going to use such middleware and you want a bit of extra security, +# you can set this to false. +# allow_overrides = true +# +# This specifies what scheme to return with storage urls: +# http, https, or default (chooses based on what the server is running as) +# This can be useful with an SSL load balancer in front of a non-SSL server. +# storage_url_scheme = default +# +# Lastly, you need to list all the accounts/users you want here. The format is: +# user__ = [group] [group] [...] [storage_url] +# or if you want underscores in or , you can base64 encode them +# (with no equal signs) and use this format: +# user64__ = [group] [group] [...] [storage_url] +# There are special groups of: +# .reseller_admin = can do anything to any account for this auth +# .admin = can do anything within the account +# If neither of these groups are specified, the user can only access containers +# that have been explicitly allowed for them by a .admin or .reseller_admin. +# The trailing optional storage_url allows you to specify an alternate url to +# hand back to the user upon authentication. If not specified, this defaults to +# $HOST/v1/_ where $HOST will do its best to resolve +# to what the requester would need to use to reach this host. +# Here are example entries, required for running the tests: +user_admin_admin = admin .admin .reseller_admin +user_test_tester = testing .admin +user_test2_tester2 = testing2 .admin +user_test_tester3 = testing3 + +# To enable Keystone authentication you need to have the auth token +# middleware first to be configured. Here is an example below, please +# refer to the keystone's documentation for details about the +# different settings. +# +# You'll need to have as well the keystoneauth middleware enabled +# and have it in your main pipeline so instead of having tempauth in +# there you can change it to: authtoken keystoneauth +# +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +# auth_host = keystonehost +# auth_port = 35357 +# auth_protocol = http +# auth_uri = http://keystonehost:5000/ +#auth_uri = http://controller:5000/v2.0 +auth_uri = http://127.0.0.1:5000/v2.0 +identity_uri = http://127.0.0.1:35357 +admin_tenant_name = service +admin_user = swift +admin_password = {{ SWIFT_ADMIN_PASSWORD }} +delay_auth_decision = 1 +# cache = swift.cache +# include_service_catalog = False +# +[filter:keystoneauth] +use = egg:swift#keystoneauth +# Operator roles is the role which user would be allowed to manage a +# tenant and be able to create container or give ACL to others. +# operator_roles = admin, swiftoperator +operator_roles = admin, _member_ +# The reseller admin role has the ability to create and delete accounts +# reseller_admin_role = ResellerAdmin +# For backwards compatibility, keystoneauth will match names in cross-tenant +# access control lists (ACLs) when both the requesting user and the tenant +# are in the default domain i.e the domain to which existing tenants are +# migrated. The default_domain_id value configured here should be the same as +# the value used during migration of tenants to keystone domains. +# default_domain_id = default +# For a new installation, or an installation in which keystone projects may +# move between domains, you should disable backwards compatible name matching +# in ACLs by setting allow_names_in_acls to false: +# allow_names_in_acls = true + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". +# This facility may be used to temporarily remove a Swift node from a load +# balancer pool during maintenance or upgrade (remove the file to allow the +# node back into the load balancer pool). +# disable_path = + +[filter:cache] +use = egg:swift#memcache +# You can override the default log routing for this filter here: +# set log_name = cache +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# If not set here, the value for memcache_servers will be read from +# memcache.conf (see memcache.conf-sample) or lacking that file, it will +# default to the value below. You can specify multiple servers separated with +# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 +memcache_servers = 127.0.0.1:11211 +# +# Sets how memcache values are serialized and deserialized: +# 0 = older, insecure pickle serialization +# 1 = json serialization but pickles can still be read (still insecure) +# 2 = json serialization only (secure and the default) +# If not set here, the value for memcache_serialization_support will be read +# from /etc/swift/memcache.conf (see memcache.conf-sample). +# To avoid an instant full cache flush, existing installations should +# upgrade with 0, then set to 1 and reload, then after some time (24 hours) +# set to 2 and reload. +# In the future, the ability to use pickle serialization will be removed. +# memcache_serialization_support = 2 +# +# Sets the maximum number of connections to each memcached server per worker +# memcache_max_connections = 2 + +[filter:ratelimit] +use = egg:swift#ratelimit +# You can override the default log routing for this filter here: +# set log_name = ratelimit +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# clock_accuracy should represent how accurate the proxy servers' system clocks +# are with each other. 1000 means that all the proxies' clock are accurate to +# each other within 1 millisecond. No ratelimit should be higher than the +# clock accuracy. +# clock_accuracy = 1000 +# +# max_sleep_time_seconds = 60 +# +# log_sleep_time_seconds of 0 means disabled +# log_sleep_time_seconds = 0 +# +# allows for slow rates (e.g. running up to 5 sec's behind) to catch up. +# rate_buffer_seconds = 5 +# +# account_ratelimit of 0 means disabled +# account_ratelimit = 0 + +# these are comma separated lists of account names +# account_whitelist = a,b +# account_blacklist = c,d + +# with container_limit_x = r +# for containers of size x limit write requests per second to r. The container +# rate will be linearly interpolated from the values given. With the values +# below, a container of size 5 will get a rate of 75. +# container_ratelimit_0 = 100 +# container_ratelimit_10 = 50 +# container_ratelimit_50 = 20 + +# Similarly to the above container-level write limits, the following will limit +# container GET (listing) requests. +# container_listing_ratelimit_0 = 100 +# container_listing_ratelimit_10 = 50 +# container_listing_ratelimit_50 = 20 + +[filter:domain_remap] +use = egg:swift#domain_remap +# You can override the default log routing for this filter here: +# set log_name = domain_remap +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# storage_domain = example.com +# path_root = v1 +# reseller_prefixes = AUTH + +[filter:catch_errors] +use = egg:swift#catch_errors +# You can override the default log routing for this filter here: +# set log_name = catch_errors +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +[filter:cname_lookup] +# Note: this middleware requires python-dnspython +use = egg:swift#cname_lookup +# You can override the default log routing for this filter here: +# set log_name = cname_lookup +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# Specify the storage_domain that match your cloud, multiple domains +# can be specified separated by a comma +# storage_domain = example.com +# +# lookup_depth = 1 + +# Note: Put staticweb just after your auth filter(s) in the pipeline +[filter:staticweb] +use = egg:swift#staticweb + +# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline +[filter:tempurl] +use = egg:swift#tempurl +# The methods allowed with Temp URLs. +# methods = GET HEAD PUT POST DELETE +# +# The headers to remove from incoming requests. Simply a whitespace delimited +# list of header names and names can optionally end with '*' to indicate a +# prefix match. incoming_allow_headers is a list of exceptions to these +# removals. +# incoming_remove_headers = x-timestamp +# +# The headers allowed as exceptions to incoming_remove_headers. Simply a +# whitespace delimited list of header names and names can optionally end with +# '*' to indicate a prefix match. +# incoming_allow_headers = +# +# The headers to remove from outgoing responses. Simply a whitespace delimited +# list of header names and names can optionally end with '*' to indicate a +# prefix match. outgoing_allow_headers is a list of exceptions to these +# removals. +# outgoing_remove_headers = x-object-meta-* +# +# The headers allowed as exceptions to outgoing_remove_headers. Simply a +# whitespace delimited list of header names and names can optionally end with +# '*' to indicate a prefix match. +# outgoing_allow_headers = x-object-meta-public-* + +# Note: Put formpost just before your auth filter(s) in the pipeline +[filter:formpost] +use = egg:swift#formpost + +# Note: Just needs to be placed before the proxy-server in the pipeline. +[filter:name_check] +use = egg:swift#name_check +# forbidden_chars = '"`<> +# maximum_length = 255 +# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$ + +[filter:list-endpoints] +use = egg:swift#list_endpoints +# list_endpoints_path = /endpoints/ + +[filter:proxy-logging] +use = egg:swift#proxy_logging +# If not set, logging directives from [DEFAULT] without "access_" will be used +# access_log_name = swift +# access_log_facility = LOG_LOCAL0 +# access_log_level = INFO +# access_log_address = /dev/log +# +# If set, access_log_udp_host will override access_log_address +# access_log_udp_host = +# access_log_udp_port = 514 +# +# You can use log_statsd_* from [DEFAULT] or override them here: +# access_log_statsd_host = localhost +# access_log_statsd_port = 8125 +# access_log_statsd_default_sample_rate = 1.0 +# access_log_statsd_sample_rate_factor = 1.0 +# access_log_statsd_metric_prefix = +# access_log_headers = false +# +# If access_log_headers is True and access_log_headers_only is set only +# these headers are logged. Multiple headers can be defined as comma separated +# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime +# access_log_headers_only = +# +# By default, the X-Auth-Token is logged. To obscure the value, +# set reveal_sensitive_prefix to the number of characters to log. +# For example, if set to 12, only the first 12 characters of the +# token appear in the log. An unauthorized access of the log file +# won't allow unauthorized usage of the token. However, the first +# 12 or so characters is unique enough that you can trace/debug +# token usage. Set to 0 to suppress the token completely (replaced +# by '...' in the log). +# Note: reveal_sensitive_prefix will not affect the value +# logged with access_log_headers=True. +# reveal_sensitive_prefix = 16 +# +# What HTTP methods are allowed for StatsD logging (comma-sep); request methods +# not in this list will have "BAD_METHOD" for the portion of the metric. +# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS +# +# Note: The double proxy-logging in the pipeline is not a mistake. The +# left-most proxy-logging is there to log requests that were handled in +# middleware and never made it through to the right-most middleware (and +# proxy server). Double logging is prevented for normal requests. See +# proxy-logging docs. + +# Note: Put before both ratelimit and auth in the pipeline. +[filter:bulk] +use = egg:swift#bulk +# max_containers_per_extraction = 10000 +# max_failed_extractions = 1000 +# max_deletes_per_request = 10000 +# max_failed_deletes = 1000 + +# In order to keep a connection active during a potentially long bulk request, +# Swift may return whitespace prepended to the actual response body. This +# whitespace will be yielded no more than every yield_frequency seconds. +# yield_frequency = 10 + +# Note: The following parameter is used during a bulk delete of objects and +# their container. This would frequently fail because it is very likely +# that all replicated objects have not been deleted by the time the middleware got a +# successful response. It can be configured the number of retries. And the +# number of seconds to wait between each retry will be 1.5**retry + +# delete_container_retry_count = 0 + +# Note: Put after auth in the pipeline. +[filter:container-quotas] +use = egg:swift#container_quotas + +# Note: Put after auth and staticweb in the pipeline. +[filter:slo] +use = egg:swift#slo +# max_manifest_segments = 1000 +# max_manifest_size = 2097152 +# min_segment_size = 1048576 +# Start rate-limiting SLO segment serving after the Nth segment of a +# segmented object. +# rate_limit_after_segment = 10 +# +# Once segment rate-limiting kicks in for an object, limit segments served +# to N per second. 0 means no rate-limiting. +# rate_limit_segments_per_sec = 0 +# +# Time limit on GET requests (seconds) +# max_get_time = 86400 + +# Note: Put after auth and staticweb in the pipeline. +# If you don't put it in the pipeline, it will be inserted for you. +[filter:dlo] +use = egg:swift#dlo +# Start rate-limiting DLO segment serving after the Nth segment of a +# segmented object. +# rate_limit_after_segment = 10 +# +# Once segment rate-limiting kicks in for an object, limit segments served +# to N per second. 0 means no rate-limiting. +# rate_limit_segments_per_sec = 1 +# +# Time limit on GET requests (seconds) +# max_get_time = 86400 + +[filter:account-quotas] +use = egg:swift#account_quotas + +[filter:gatekeeper] +use = egg:swift#gatekeeper +# You can override the default log routing for this filter here: +# set log_name = gatekeeper +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +[filter:container_sync] +use = egg:swift#container_sync +# Set this to false if you want to disallow any full url values to be set for +# any new X-Container-Sync-To headers. This will keep any new full urls from +# coming in, but won't change any existing values already in the cluster. +# Updating those will have to be done manually, as knowing what the true realm +# endpoint should be cannot always be guessed. +# allow_full_urls = true +# Set this to specify this clusters //realm/cluster as "current" in /info +# current = //REALM/CLUSTER + +# Note: Put it at the beginning of the pipleline to profile all middleware. But +# it is safer to put this after catch_errors, gatekeeper and healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/proxy.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false diff --git a/install-files/swift/etc/ntp.conf b/install-files/swift/etc/ntp.conf new file mode 100644 index 00000000..54522871 --- /dev/null +++ b/install-files/swift/etc/ntp.conf @@ -0,0 +1,25 @@ +{% if SWIFT_CONTROLLER is undefined or SWIFT_CONTROLLER == "False" %} +server {{ CONTROLLER_HOST_ADDRESS }} iburst +{% else %} +# We use iburst here to reduce the potential initial delay to set the clock +server 0.pool.ntp.org iburst +server 1.pool.ntp.org iburst +server 2.pool.ntp.org iburst +server 3.pool.ntp.org iburst + +# kod - notify client when packets are denied service, +# rather than just dropping the packets +# +# nomodify - deny queries which attempt to modify the state of the server +# +# notrap - decline to provide mode 6 control message trap service to +# matching hosts +# +# see ntp.conf(5) for more details +restrict -4 default kod notrap nomodify +restrict -6 default kod notrap nomodify +{% endif %} + +# The default rlimit isn't enough in some cases +# so we set a higher limit here +rlimit memlock 256 diff --git a/install-files/swift/manifest b/install-files/swift/manifest new file mode 100644 index 00000000..7fd76206 --- /dev/null +++ b/install-files/swift/manifest @@ -0,0 +1,15 @@ +0040755 0 0 /usr/share +0040755 0 0 /usr/share/swift +0100644 0 0 /usr/share/swift/hosts +0100644 0 0 /usr/share/swift/swift-storage.yml +0040755 0 0 /usr/share/swift/etc +0040755 0 0 /usr/share/swift/etc/swift +0100644 0 0 /usr/share/swift/etc/swift/account-server.j2 +0100644 0 0 /usr/share/swift/etc/swift/swift.j2 +0100644 0 0 /usr/share/swift/etc/swift/object-server.j2 +0100644 0 0 /usr/share/swift/etc/swift/container-server.j2 +0100644 0 0 /usr/share/swift/etc/rsyncd.j2 +0100644 0 0 /usr/lib/systemd/system/swift-storage-setup.service +0100644 0 0 /usr/lib/systemd/system/swift-storage.service +template overwrite 0100644 0 0 /etc/ntp.conf +overwrite 0100644 0 0 /usr/lib/systemd/system/rsync.service diff --git a/install-files/swift/usr/lib/systemd/system/rsync.service b/install-files/swift/usr/lib/systemd/system/rsync.service new file mode 100644 index 00000000..babcfb46 --- /dev/null +++ b/install-files/swift/usr/lib/systemd/system/rsync.service @@ -0,0 +1,11 @@ +[Unit] +Description=fast remote file copy program daemon +After=swift-storage-setup.service +ConditionPathExists=/etc/rsyncd.conf + +[Service] +ExecStart=/usr/bin/rsync --daemon --no-detach +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/install-files/swift/usr/lib/systemd/system/swift-storage-setup.service b/install-files/swift/usr/lib/systemd/system/swift-storage-setup.service new file mode 100644 index 00000000..3df31163 --- /dev/null +++ b/install-files/swift/usr/lib/systemd/system/swift-storage-setup.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run openstack-swift-storage-setup (once) +After=local-fs.target postgres-server-setup.service + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/swift/hosts /usr/share/swift/swift-storage.yml +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/install-files/swift/usr/lib/systemd/system/swift-storage.service b/install-files/swift/usr/lib/systemd/system/swift-storage.service new file mode 100644 index 00000000..dc41d3bc --- /dev/null +++ b/install-files/swift/usr/lib/systemd/system/swift-storage.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenStack Swift Storage +After=syslog.target network.target swift-storage-setup.service + +[Service] +Type=forking +Restart=on-failure +ExecStart=/usr/bin/swift-init all start +ExecStop=/usr/bin/swift-init all stop + +[Install] +WantedBy=multi-user.target diff --git a/install-files/swift/usr/share/swift/etc/rsyncd.j2 b/install-files/swift/usr/share/swift/etc/rsyncd.j2 new file mode 100644 index 00000000..c0657665 --- /dev/null +++ b/install-files/swift/usr/share/swift/etc/rsyncd.j2 @@ -0,0 +1,23 @@ +uid = swift +gid = swift +log file = /var/log/rsyncd.log +pid file = /var/run/rsyncd.pid +address = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} + +[account] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/account.lock + +[container] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/container.lock + +[object] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/object.lock diff --git a/install-files/swift/usr/share/swift/etc/swift/account-server.j2 b/install-files/swift/usr/share/swift/etc/swift/account-server.j2 new file mode 100644 index 00000000..d977e295 --- /dev/null +++ b/install-files/swift/usr/share/swift/etc/swift/account-server.j2 @@ -0,0 +1,192 @@ +[DEFAULT] +# bind_ip = 0.0.0.0 +bind_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} +bind_port = 6002 +# bind_timeout = 30 +# backlog = 4096 +user = swift +swift_dir = /etc/swift +devices = /srv/node +# mount_check = true +# disable_fallocate = false +# +# Use an integer to override the number of pre-forked processes that will +# accept connections. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = localhost +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# If you don't mind the extra disk space usage in overhead, you can turn this +# on to preallocate disk space with SQLite databases to decrease fragmentation. +# db_preallocation = off +# +# eventlet_debug = false +# +# You can set fallocate_reserve to the number of bytes you'd like fallocate to +# reserve, whether there is space for the given file size or not. +# fallocate_reserve = 0 + +[pipeline:main] +pipeline = healthcheck recon account-server + +[app:account-server] +use = egg:swift#account +# You can override the default log routing for this app here: +# set log_name = account-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = true +# set log_address = /dev/log +# +# auto_create_account_prefix = . +# +# Configure parameter for creating specific server +# To handle all verbs, including replication verbs, do not specify +# "replication_server" (this is the default). To only handle replication, +# set to a True value (e.g. "True" or "1"). To handle only non-replication +# verbs, set to "False". Unless you have a separate replication network, you +# should not specify any value for "replication_server". +# replication_server = false + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" +# disable_path = + +[filter:recon] +use = egg:swift#recon +recon_cache_path = /var/cache/swift + +[account-replicator] +# You can override the default log routing for this app here (don't use set!): +# log_name = account-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# vm_test_mode = no +# per_diff = 1000 +# max_diffs = 100 +# concurrency = 8 +# interval = 30 +# +# How long without an error before a node's error count is reset. This will +# also be how long before a node is reenabled after suppression is triggered. +# error_suppression_interval = 60 +# +# How many errors can accumulate before a node is temporarily ignored. +# error_suppression_limit = 10 +# +# node_timeout = 10 +# conn_timeout = 0.5 +# +# The replicator also performs reclamation +# reclaim_age = 604800 +# +# Time in seconds to wait between replication passes +# Note: if the parameter 'interval' is defined then it will be used in place +# of run_pause. +# run_pause = 30 +# +# recon_cache_path = /var/cache/swift + +[account-auditor] +# You can override the default log routing for this app here (don't use set!): +# log_name = account-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Will audit each account at most once per interval +# interval = 1800 +# +# log_facility = LOG_LOCAL0 +# log_level = INFO +# accounts_per_second = 200 +# recon_cache_path = /var/cache/swift + +[account-reaper] +# You can override the default log routing for this app here (don't use set!): +# log_name = account-reaper +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# concurrency = 25 +# interval = 3600 +# node_timeout = 10 +# conn_timeout = 0.5 +# +# Normally, the reaper begins deleting account information for deleted accounts +# immediately; you can set this to delay its work however. The value is in +# seconds; 2592000 = 30 days for example. +# delay_reaping = 0 +# +# If the account fails to be be reaped due to a persistent error, the +# account reaper will log a message such as: +# Account has not been reaped since +# You can search logs for this message if space is not being reclaimed +# after you delete account(s). +# Default is 2592000 seconds (30 days). This is in addition to any time +# requested by delay_reaping. +# reap_warn_after = 2592000 + +# Note: Put it at the beginning of the pipeline to profile all middleware. But +# it is safer to put this after healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/account.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false diff --git a/install-files/swift/usr/share/swift/etc/swift/container-server.j2 b/install-files/swift/usr/share/swift/etc/swift/container-server.j2 new file mode 100644 index 00000000..d226d016 --- /dev/null +++ b/install-files/swift/usr/share/swift/etc/swift/container-server.j2 @@ -0,0 +1,203 @@ +[DEFAULT] +# bind_ip = 0.0.0.0 +bind_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} +bind_port = 6001 +# bind_timeout = 30 +# backlog = 4096 +user = swift +swift_dir = /etc/swift +devices = /srv/node +# mount_check = true +# disable_fallocate = false +# +# Use an integer to override the number of pre-forked processes that will +# accept connections. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# This is a comma separated list of hosts allowed in the X-Container-Sync-To +# field for containers. This is the old-style of using container sync. It is +# strongly recommended to use the new style of a separate +# container-sync-realms.conf -- see container-sync-realms.conf-sample +# allowed_sync_hosts = 127.0.0.1 +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = localhost +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# If you don't mind the extra disk space usage in overhead, you can turn this +# on to preallocate disk space with SQLite databases to decrease fragmentation. +# db_preallocation = off +# +# eventlet_debug = false +# +# You can set fallocate_reserve to the number of bytes you'd like fallocate to +# reserve, whether there is space for the given file size or not. +# fallocate_reserve = 0 + +[pipeline:main] +pipeline = healthcheck recon container-server + +[app:container-server] +use = egg:swift#container +# You can override the default log routing for this app here: +# set log_name = container-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = true +# set log_address = /dev/log +# +# node_timeout = 3 +# conn_timeout = 0.5 +# allow_versions = false +# auto_create_account_prefix = . +# +# Configure parameter for creating specific server +# To handle all verbs, including replication verbs, do not specify +# "replication_server" (this is the default). To only handle replication, +# set to a True value (e.g. "True" or "1"). To handle only non-replication +# verbs, set to "False". Unless you have a separate replication network, you +# should not specify any value for "replication_server". +# replication_server = false + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" +# disable_path = + +[filter:recon] +use = egg:swift#recon +recon_cache_path = /var/cache/swift + +[container-replicator] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# vm_test_mode = no +# per_diff = 1000 +# max_diffs = 100 +# concurrency = 8 +# interval = 30 +# node_timeout = 10 +# conn_timeout = 0.5 +# +# The replicator also performs reclamation +# reclaim_age = 604800 +# +# Time in seconds to wait between replication passes +# Note: if the parameter 'interval' is defined then it will be used in place +# of run_pause. +# run_pause = 30 +# +# recon_cache_path = /var/cache/swift + +[container-updater] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-updater +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# interval = 300 +# concurrency = 4 +# node_timeout = 3 +# conn_timeout = 0.5 +# +# slowdown will sleep that amount between containers +# slowdown = 0.01 +# +# Seconds to suppress updating an account that has generated an error +# account_suppression_time = 60 +# +# recon_cache_path = /var/cache/swift + +[container-auditor] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Will audit each container at most once per interval +# interval = 1800 +# +# containers_per_second = 200 +# recon_cache_path = /var/cache/swift + +[container-sync] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-sync +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# If you need to use an HTTP Proxy, set it here; defaults to no proxy. +# You can also set this to a comma separated list of HTTP Proxies and they will +# be randomly used (simple load balancing). +# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888 +# +# Will sync each container at most once per interval +# interval = 300 +# +# Maximum amount of time to spend syncing each container per pass +# container_time = 60 + +# Note: Put it at the beginning of the pipeline to profile all middleware. But +# it is safer to put this after healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/container.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false diff --git a/install-files/swift/usr/share/swift/etc/swift/object-server.j2 b/install-files/swift/usr/share/swift/etc/swift/object-server.j2 new file mode 100644 index 00000000..66990be9 --- /dev/null +++ b/install-files/swift/usr/share/swift/etc/swift/object-server.j2 @@ -0,0 +1,283 @@ +[DEFAULT] +# bind_ip = 0.0.0.0 +bind_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} +bind_port = 6000 +# bind_timeout = 30 +# backlog = 4096 +user = swift +swift_dir = /etc/swift +devices = /srv/node +# mount_check = true +# disable_fallocate = false +# expiring_objects_container_divisor = 86400 +# expiring_objects_account_name = expiring_objects +# +# Use an integer to override the number of pre-forked processes that will +# accept connections. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = localhost +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# eventlet_debug = false +# +# You can set fallocate_reserve to the number of bytes you'd like fallocate to +# reserve, whether there is space for the given file size or not. +# fallocate_reserve = 0 +# +# Time to wait while attempting to connect to another backend node. +# conn_timeout = 0.5 +# Time to wait while sending each chunk of data to another backend node. +# node_timeout = 3 +# Time to wait while receiving each chunk of data from a client or another +# backend node. +# client_timeout = 60 +# +# network_chunk_size = 65536 +# disk_chunk_size = 65536 + +[pipeline:main] +pipeline = healthcheck recon object-server + +[app:object-server] +use = egg:swift#object +# You can override the default log routing for this app here: +# set log_name = object-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = true +# set log_address = /dev/log +# +# max_upload_time = 86400 +# slow = 0 +# +# Objects smaller than this are not evicted from the buffercache once read +# keep_cache_size = 5424880 +# +# If true, objects for authenticated GET requests may be kept in buffer cache +# if small enough +# keep_cache_private = false +# +# on PUTs, sync data every n MB +# mb_per_sync = 512 +# +# Comma separated list of headers that can be set in metadata on an object. +# This list is in addition to X-Object-Meta-* headers and cannot include +# Content-Type, etag, Content-Length, or deleted +# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object +# +# auto_create_account_prefix = . +# +# A value of 0 means "don't use thread pools". A reasonable starting point is +# 4. +# threads_per_disk = 0 +# +# Configure parameter for creating specific server +# To handle all verbs, including replication verbs, do not specify +# "replication_server" (this is the default). To only handle replication, +# set to a True value (e.g. "True" or "1"). To handle only non-replication +# verbs, set to "False". Unless you have a separate replication network, you +# should not specify any value for "replication_server". +# replication_server = false +# +# Set to restrict the number of concurrent incoming REPLICATION requests +# Set to 0 for unlimited +# Note that REPLICATION is currently an ssync only item +# replication_concurrency = 4 +# +# Restricts incoming REPLICATION requests to one per device, +# replication_currency above allowing. This can help control I/O to each +# device, but you may wish to set this to False to allow multiple REPLICATION +# requests (up to the above replication_concurrency setting) per device. +# replication_one_per_device = True +# +# Number of seconds to wait for an existing replication device lock before +# giving up. +# replication_lock_timeout = 15 +# +# These next two settings control when the REPLICATION subrequest handler will +# abort an incoming REPLICATION attempt. An abort will occur if there are at +# least threshold number of failures and the value of failures / successes +# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100 +# failures have to occur and there have to be more failures than successes for +# an abort to occur. +# replication_failure_threshold = 100 +# replication_failure_ratio = 1.0 +# +# Use splice() for zero-copy object GETs. This requires Linux kernel +# version 3.0 or greater. If you set "splice = yes" but the kernel +# does not support it, error messages will appear in the object server +# logs at startup, but your object servers should continue to function. +# +# splice = no + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" +# disable_path = + +[filter:recon] +use = egg:swift#recon +recon_cache_path = /var/cache/swift +#recon_lock_path = /var/lock + +[object-replicator] +# You can override the default log routing for this app here (don't use set!): +# log_name = object-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# vm_test_mode = no +# daemonize = on +# run_pause = 30 +# concurrency = 1 +# stats_interval = 300 +# +# The sync method to use; default is rsync but you can use ssync to try the +# EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified +# as having performance comparable to, or better than, rsync, we plan to +# deprecate rsync so we can move on with more features for replication. +# sync_method = rsync +# +# max duration of a partition rsync +# rsync_timeout = 900 +# +# bandwidth limit for rsync in kB/s. 0 means unlimited +# rsync_bwlimit = 0 +# +# passed to rsync for io op timeout +# rsync_io_timeout = 30 +# +# node_timeout = +# max duration of an http request; this is for REPLICATE finalization calls and +# so should be longer than node_timeout +# http_timeout = 60 +# +# attempts to kill all workers if nothing replicates for lockup_timeout seconds +# lockup_timeout = 1800 +# +# The replicator also performs reclamation +# reclaim_age = 604800 +# +# ring_check_interval = 15 +# recon_cache_path = /var/cache/swift +# +# limits how long rsync error log lines are +# 0 means to log the entire line +# rsync_error_log_line_length = 0 +# +# handoffs_first and handoff_delete are options for a special case +# such as disk full in the cluster. These two options SHOULD NOT BE +# CHANGED, except for such an extreme situations. (e.g. disks filled up +# or are about to fill up. Anyway, DO NOT let your drives fill up) +# handoffs_first is the flag to replicate handoffs prior to canonical +# partitions. It allows to force syncing and deleting handoffs quickly. +# If set to a True value(e.g. "True" or "1"), partitions +# that are not supposed to be on the node will be replicated first. +# handoffs_first = False +# +# handoff_delete is the number of replicas which are ensured in swift. +# If the number less than the number of replicas is set, object-replicator +# could delete local handoffs even if all replicas are not ensured in the +# cluster. Object-replicator would remove local handoff partition directories +# after syncing partition when the number of successful responses is greater +# than or equal to this number. By default(auto), handoff partitions will be +# removed when it has successfully replicated to all the canonical nodes. +# handoff_delete = auto + +[object-updater] +# You can override the default log routing for this app here (don't use set!): +# log_name = object-updater +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# interval = 300 +# concurrency = 1 +# node_timeout = +# slowdown will sleep that amount between objects +# slowdown = 0.01 +# +# recon_cache_path = /var/cache/swift + +[object-auditor] +# You can override the default log routing for this app here (don't use set!): +# log_name = object-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# You can set the disk chunk size that the auditor uses making it larger if +# you like for more efficient local auditing of larger objects +# disk_chunk_size = 65536 +# files_per_second = 20 +# concurrency = 1 +# bytes_per_second = 10000000 +# log_time = 3600 +# zero_byte_files_per_second = 50 +# recon_cache_path = /var/cache/swift + +# Takes a comma separated list of ints. If set, the object auditor will +# increment a counter for every object whose size is <= to the given break +# points and report the result after a full scan. +# object_size_stats = + +# Note: Put it at the beginning of the pipleline to profile all middleware. But +# it is safer to put this after healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/object.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false diff --git a/install-files/swift/usr/share/swift/etc/swift/swift.j2 b/install-files/swift/usr/share/swift/etc/swift/swift.j2 new file mode 100644 index 00000000..6d76215a --- /dev/null +++ b/install-files/swift/usr/share/swift/etc/swift/swift.j2 @@ -0,0 +1,118 @@ +[swift-hash] + +# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the +# the hashing algorithm when determining data placement in the cluster. +# These values should remain secret and MUST NOT change +# once a cluster has been deployed. + +swift_hash_path_suffix = {{ SWIFT_HASH_PATH_SUFFIX }} +swift_hash_path_prefix = {{ SWIFT_HASH_PATH_PREFIX }} + +# storage policies are defined here and determine various characteristics +# about how objects are stored and treated. Policies are specified by name on +# a per container basis. Names are case-insensitive. The policy index is +# specified in the section header and is used internally. The policy with +# index 0 is always used for legacy containers and can be given a name for use +# in metadata however the ring file name will always be 'object.ring.gz' for +# backwards compatibility. If no policies are defined a policy with index 0 +# will be automatically created for backwards compatibility and given the name +# Policy-0. A default policy is used when creating new containers when no +# policy is specified in the request. If no other policies are defined the +# policy with index 0 will be declared the default. If multiple policies are +# defined you must define a policy with index 0 and you must specify a +# default. It is recommended you always define a section for +# storage-policy:0. +[storage-policy:0] +name = Policy-0 +default = yes + +# the following section would declare a policy called 'silver', the number of +# replicas will be determined by how the ring is built. In this example the +# 'silver' policy could have a lower or higher # of replicas than the +# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You +# may only specify one storage policy section as the default. If you changed +# this section to specify 'silver' as the default, when a client created a new +# container w/o a policy specified, it will get the 'silver' policy because +# this config has specified it as the default. However if a legacy container +# (one created with a pre-policy version of swift) is accessed, it is known +# implicitly to be assigned to the policy with index 0 as opposed to the +# current default. +#[storage-policy:1] +#name = silver + +# The swift-constraints section sets the basic constraints on data +# saved in the swift cluster. These constraints are automatically +# published by the proxy server in responses to /info requests. + +[swift-constraints] + +# max_file_size is the largest "normal" object that can be saved in +# the cluster. This is also the limit on the size of each segment of +# a "large" object when using the large object manifest support. +# This value is set in bytes. Setting it to lower than 1MiB will cause +# some tests to fail. It is STRONGLY recommended to leave this value at +# the default (5 * 2**30 + 2). + +#max_file_size = 5368709122 + + +# max_meta_name_length is the max number of bytes in the utf8 encoding +# of the name portion of a metadata header. + +#max_meta_name_length = 128 + + +# max_meta_value_length is the max number of bytes in the utf8 encoding +# of a metadata value + +#max_meta_value_length = 256 + + +# max_meta_count is the max number of metadata keys that can be stored +# on a single account, container, or object + +#max_meta_count = 90 + + +# max_meta_overall_size is the max number of bytes in the utf8 encoding +# of the metadata (keys + values) + +#max_meta_overall_size = 4096 + +# max_header_size is the max number of bytes in the utf8 encoding of each +# header. Using 8192 as default because eventlet use 8192 as max size of +# header line. This value may need to be increased when using identity +# v3 API tokens including more than 7 catalog entries. +# See also include_service_catalog in proxy-server.conf-sample +# (documented in overview_auth.rst) + +#max_header_size = 8192 + + +# max_object_name_length is the max number of bytes in the utf8 encoding +# of an object name + +#max_object_name_length = 1024 + + +# container_listing_limit is the default (and max) number of items +# returned for a container listing request + +#container_listing_limit = 10000 + + +# account_listing_limit is the default (and max) number of items returned +# for an account listing request +#account_listing_limit = 10000 + + +# max_account_name_length is the max number of bytes in the utf8 encoding +# of an account name + +#max_account_name_length = 256 + + +# max_container_name_length is the max number of bytes in the utf8 encoding +# of a container name + +#max_container_name_length = 256 diff --git a/install-files/swift/usr/share/swift/hosts b/install-files/swift/usr/share/swift/hosts new file mode 100644 index 00000000..5b97818d --- /dev/null +++ b/install-files/swift/usr/share/swift/hosts @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/install-files/swift/usr/share/swift/swift-storage.yml b/install-files/swift/usr/share/swift/swift-storage.yml new file mode 100644 index 00000000..62a335ed --- /dev/null +++ b/install-files/swift/usr/share/swift/swift-storage.yml @@ -0,0 +1,24 @@ +--- +- hosts: localhost + vars_files: + - swift-storage-vars.yml + vars: + remote_user: root + tasks: + - user: name=swift comment="Swift user" + - file: path=/etc/swift owner=swift group=swift state=directory recurse=yes + + - template: src=/usr/share/swift/etc/rsyncd.j2 dest=/etc/rsyncd.conf + mode=0644 owner=swift group=swift + + - template: src=/usr/share/swift/etc/swift/{{ item }}.j2 + dest=/etc/swift/{{ item }}.conf mode=0644 owner=swift group=swift + with_items: + - account-server + - container-server + - object-server + - swift + + - file: path=/srv/node owner=swift group=swift state=directory recurse=yes + - file: path=/var/cache/swift owner=swift group=swift state=directory + recurse=yes diff --git a/install-files/vagrant-files/home/vagrant/.ssh/authorized_keys b/install-files/vagrant-files/home/vagrant/.ssh/authorized_keys new file mode 100644 index 00000000..18a9c00f --- /dev/null +++ b/install-files/vagrant-files/home/vagrant/.ssh/authorized_keys @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key diff --git a/install-files/vagrant-files/manifest b/install-files/vagrant-files/manifest new file mode 100644 index 00000000..67168341 --- /dev/null +++ b/install-files/vagrant-files/manifest @@ -0,0 +1,4 @@ +0040755 0 0 /home +0040755 1000 0000 /home/vagrant +0040700 1000 1000 /home/vagrant/.ssh +0100600 1000 1000 /home/vagrant/.ssh/authorized_keys diff --git a/moonshot/boot/m400-1003.dtb b/moonshot/boot/m400-1003.dtb deleted file mode 100644 index d6fd83ee..00000000 Binary files a/moonshot/boot/m400-1003.dtb and /dev/null differ diff --git a/moonshot/manifest b/moonshot/manifest deleted file mode 100644 index dd80fe49..00000000 --- a/moonshot/manifest +++ /dev/null @@ -1,2 +0,0 @@ -0040755 0 0 /boot -0100744 0 0 /boot/m400-1003.dtb diff --git a/openstack/etc/horizon/apache-horizon.conf b/openstack/etc/horizon/apache-horizon.conf deleted file mode 100644 index ea88897a..00000000 --- a/openstack/etc/horizon/apache-horizon.conf +++ /dev/null @@ -1,34 +0,0 @@ - - WSGIScriptAlias /horizon /var/lib/horizon/openstack_dashboard/django.wsgi - WSGIDaemonProcess horizon user=horizon group=horizon processes=3 threads=10 home=/var/lib/horizon display-name=horizon - WSGIApplicationGroup %{GLOBAL} - - RedirectMatch ^/$ /horizon/ - - SetEnv APACHE_RUN_USER apache - SetEnv APACHE_RUN_GROUP apache - WSGIProcessGroup horizon - - DocumentRoot /var/lib/horizon/.blackhole - Alias /static /var/lib/horizon/openstack_dashboard/static - - - Options Indexes FollowSymLinks MultiViews - AllowOverride None - # Apache 2.4 uses mod_authz_host for access control now (instead of - # "Allow") - - Order allow,deny - Allow from all - - = 2.4> - Require all granted - - - - ErrorLog /var/log/httpd/horizon_error.log - LogLevel warn - CustomLog /var/log/httpd/horizon_access.log combined - - -WSGISocketPrefix /var/run/httpd diff --git a/openstack/etc/horizon/openstack_dashboard/local_settings.py b/openstack/etc/horizon/openstack_dashboard/local_settings.py deleted file mode 100644 index febc3e70..00000000 --- a/openstack/etc/horizon/openstack_dashboard/local_settings.py +++ /dev/null @@ -1,551 +0,0 @@ -import os - -from django.utils.translation import ugettext_lazy as _ - -from openstack_dashboard import exceptions - -DEBUG = True -TEMPLATE_DEBUG = DEBUG - -STATIC_ROOT = "/var/lib/horizon/openstack_dashboard/static" - -# Required for Django 1.5. -# If horizon is running in production (DEBUG is False), set this -# with the list of host/domain names that the application can serve. -# For more information see: -# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts -#ALLOWED_HOSTS = ['horizon.example.com', ] -ALLOWED_HOSTS = ['*'] - -# Set SSL proxy settings: -# For Django 1.4+ pass this header from the proxy after terminating the SSL, -# and don't forget to strip it from the client's request. -# For more information see: -# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header -# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') - -# If Horizon is being served through SSL, then uncomment the following two -# settings to better secure the cookies from security exploits -#CSRF_COOKIE_SECURE = True -#SESSION_COOKIE_SECURE = True - -# Overrides for OpenStack API versions. Use this setting to force the -# OpenStack dashboard to use a specific API version for a given service API. -# NOTE: The version should be formatted as it appears in the URL for the -# service API. For example, The identity service APIs have inconsistent -# use of the decimal point, so valid options would be "2.0" or "3". -# OPENSTACK_API_VERSIONS = { -# "data_processing": 1.1, -# "identity": 3, -# "volume": 2 -# } - -# Set this to True if running on multi-domain model. When this is enabled, it -# will require user to enter the Domain name in addition to username for login. -# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False - -# Overrides the default domain used when running on single-domain model -# with Keystone V3. All entities will be created in the default domain. -# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' - -# Set Console type: -# valid options would be "AUTO"(default), "VNC", "SPICE", "RDP" or None -# Set to None explicitly if you want to deactivate the console. -# CONSOLE_TYPE = "AUTO" - -# Default OpenStack Dashboard configuration. -HORIZON_CONFIG = { - 'user_home': 'openstack_dashboard.views.get_user_home', - 'ajax_queue_limit': 10, - 'auto_fade_alerts': { - 'delay': 3000, - 'fade_duration': 1500, - 'types': ['alert-success', 'alert-info'] - }, - 'help_url': "http://docs.openstack.org", - 'exceptions': {'recoverable': exceptions.RECOVERABLE, - 'not_found': exceptions.NOT_FOUND, - 'unauthorized': exceptions.UNAUTHORIZED}, - 'modal_backdrop': 'static', - 'angular_modules': [], - 'js_files': [], -} - -# Specify a regular expression to validate user passwords. -# HORIZON_CONFIG["password_validator"] = { -# "regex": '.*', -# "help_text": _("Your password does not meet the requirements.") -# } - -# Disable simplified floating IP address management for deployments with -# multiple floating IP pools or complex network requirements. -# HORIZON_CONFIG["simple_ip_management"] = False - -# Turn off browser autocompletion for forms including the login form and -# the database creation workflow if so desired. -# HORIZON_CONFIG["password_autocomplete"] = "off" - -# Setting this to True will disable the reveal button for password fields, -# including on the login form. -# HORIZON_CONFIG["disable_password_reveal"] = False - -#LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) - -LOCAL_PATH = "/var/lib/horizon" - -# Set custom secret key: -# You can either set it to a specific value or you can let horizon generate a -# default secret key that is unique on this machine, e.i. regardless of the -# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there -# may be situations where you would want to set this explicitly, e.g. when -# multiple dashboard instances are distributed on different machines (usually -# behind a load-balancer). Either you have to make sure that a session gets all -# requests routed to the same dashboard instance or you set the same SECRET_KEY -# for all of them. -from horizon.utils import secret_key -SECRET_KEY = secret_key.generate_or_read_from_file( - os.path.join(LOCAL_PATH, '.secret_key_store')) - -# We recommend you use memcached for development; otherwise after every reload -# of the django development server, you will have to login again. To use -# memcached set CACHES to something like -CACHES = { - 'default': { - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', - 'LOCATION': '127.0.0.1:11211', - } -} - -#CACHES = { -# 'default': { -# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache' -# } -#} - -# Send email to the console by default -EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' -# Or send them to /dev/null -#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' - -# Configure these for your outgoing email host -# EMAIL_HOST = 'smtp.my-company.com' -# EMAIL_PORT = 25 -# EMAIL_HOST_USER = 'djangomail' -# EMAIL_HOST_PASSWORD = 'top-secret!' - -# For multiple regions uncomment this configuration, and add (endpoint, title). -# AVAILABLE_REGIONS = [ -# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), -# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), -# ] - -OPENSTACK_HOST = "127.0.0.1" -OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST -OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" - -# Disable SSL certificate checks (useful for self-signed certificates): -# OPENSTACK_SSL_NO_VERIFY = True - -# The CA certificate to use to verify SSL connections -# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' - -# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the -# capabilities of the auth backend for Keystone. -# If Keystone has been configured to use LDAP as the auth backend then set -# can_edit_user to False and name to 'ldap'. -# -# TODO(tres): Remove these once Keystone has an API to identify auth backend. -OPENSTACK_KEYSTONE_BACKEND = { - 'name': 'native', - 'can_edit_user': True, - 'can_edit_group': True, - 'can_edit_project': True, - 'can_edit_domain': True, - 'can_edit_role': True -} - -#Setting this to True, will add a new "Retrieve Password" action on instance, -#allowing Admin session password retrieval/decryption. -#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False - -# The Xen Hypervisor has the ability to set the mount point for volumes -# attached to instances (other Hypervisors currently do not). Setting -# can_set_mount_point to True will add the option to set the mount point -# from the UI. -OPENSTACK_HYPERVISOR_FEATURES = { - 'can_set_mount_point': False, - 'can_set_password': False, -} - -# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional -# services provided by cinder that is not exposed by its extension API. -OPENSTACK_CINDER_FEATURES = { - 'enable_backup': False, -} - -# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional -# services provided by neutron. Options currently available are load -# balancer service, security groups, quotas, VPN service. -OPENSTACK_NEUTRON_NETWORK = { - 'enable_router': True, - 'enable_quotas': True, - 'enable_ipv6': True, - 'enable_distributed_router': False, - 'enable_ha_router': False, - 'enable_lb': True, - 'enable_firewall': True, - 'enable_vpn': True, - # The profile_support option is used to detect if an external router can be - # configured via the dashboard. When using specific plugins the - # profile_support can be turned on if needed. - 'profile_support': None, - #'profile_support': 'cisco', - # Set which provider network types are supported. Only the network types - # in this list will be available to choose from when creating a network. - # Network types include local, flat, vlan, gre, and vxlan. - 'supported_provider_types': ['*'], -} - -# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features -# in the OpenStack Dashboard related to the Image service, such as the list -# of supported image formats. -# OPENSTACK_IMAGE_BACKEND = { -# 'image_formats': [ -# ('', _('Select format')), -# ('aki', _('AKI - Amazon Kernel Image')), -# ('ami', _('AMI - Amazon Machine Image')), -# ('ari', _('ARI - Amazon Ramdisk Image')), -# ('iso', _('ISO - Optical Disk Image')), -# ('qcow2', _('QCOW2 - QEMU Emulator')), -# ('raw', _('Raw')), -# ('vdi', _('VDI')), -# ('vhd', _('VHD')), -# ('vmdk', _('VMDK')) -# ] -# } - -# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for -# image custom property attributes that appear on image detail pages. -IMAGE_CUSTOM_PROPERTY_TITLES = { - "architecture": _("Architecture"), - "kernel_id": _("Kernel ID"), - "ramdisk_id": _("Ramdisk ID"), - "image_state": _("Euca2ools state"), - "project_id": _("Project ID"), - "image_type": _("Image Type") -} - -# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image -# custom properties should not be displayed in the Image Custom Properties -# table. -IMAGE_RESERVED_CUSTOM_PROPERTIES = [] - -# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is 'publicURL'. -#OPENSTACK_ENDPOINT_TYPE = "publicURL" - -# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the -# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is None. This -# value should differ from OPENSTACK_ENDPOINT_TYPE if used. -#SECONDARY_ENDPOINT_TYPE = "publicURL" - -# The number of objects (Swift containers/objects or images) to display -# on a single page before providing a paging element (a "more" link) -# to paginate results. -API_RESULT_LIMIT = 1000 -API_RESULT_PAGE_SIZE = 20 - -# Specify a maximum number of items to display in a dropdown. -DROPDOWN_MAX_ITEMS = 30 - -# The timezone of the server. This should correspond with the timezone -# of your entire OpenStack installation, and hopefully be in UTC. -TIME_ZONE = "UTC" - -# When launching an instance, the menu of available flavors is -# sorted by RAM usage, ascending. If you would like a different sort order, -# you can provide another flavor attribute as sorting key. Alternatively, you -# can provide a custom callback method to use for sorting. You can also provide -# a flag for reverse sort. For more info, see -# http://docs.python.org/2/library/functions.html#sorted -# CREATE_INSTANCE_FLAVOR_SORT = { -# 'key': 'name', -# # or -# 'key': my_awesome_callback_method, -# 'reverse': False, -# } - -# The Horizon Policy Enforcement engine uses these values to load per service -# policy rule files. The content of these files should match the files the -# OpenStack services are using to determine role based access control in the -# target installation. - -# Path to directory containing policy.json files -#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") -# Map of local copy of service policy files -#POLICY_FILES = { -# 'identity': 'keystone_policy.json', -# 'compute': 'nova_policy.json', -# 'volume': 'cinder_policy.json', -# 'image': 'glance_policy.json', -# 'orchestration': 'heat_policy.json', -# 'network': 'neutron_policy.json', -#} - -# Trove user and database extension support. By default support for -# creating users and databases on database instances is turned on. -# To disable these extensions set the permission here to something -# unusable such as ["!"]. -# TROVE_ADD_USER_PERMS = [] -# TROVE_ADD_DATABASE_PERMS = [] - -LOGGING = { - 'version': 1, - # When set to True this will disable all logging except - # for loggers specified in this configuration dictionary. Note that - # if nothing is specified here and disable_existing_loggers is True, - # django.db.backends will still log unless it is disabled explicitly. - 'disable_existing_loggers': False, - 'handlers': { - 'null': { - 'level': 'DEBUG', - 'class': 'django.utils.log.NullHandler', - }, - 'console': { - # Set the level to "DEBUG" for verbose output logging. - 'level': 'INFO', - 'class': 'logging.StreamHandler', - }, - }, - 'loggers': { - # Logging from django.db.backends is VERY verbose, send to null - # by default. - 'django.db.backends': { - 'handlers': ['null'], - 'propagate': False, - }, - 'requests': { - 'handlers': ['null'], - 'propagate': False, - }, - 'horizon': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'openstack_dashboard': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'novaclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'cinderclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'keystoneclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'glanceclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'neutronclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'heatclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'ceilometerclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'troveclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'swiftclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'openstack_auth': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'nose.plugins.manager': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'django': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'iso8601': { - 'handlers': ['null'], - 'propagate': False, - }, - 'scss': { - 'handlers': ['null'], - 'propagate': False, - }, - } -} - -# 'direction' should not be specified for all_tcp/udp/icmp. -# It is specified in the form. -SECURITY_GROUP_RULES = { - 'all_tcp': { - 'name': _('All TCP'), - 'ip_protocol': 'tcp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_udp': { - 'name': _('All UDP'), - 'ip_protocol': 'udp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_icmp': { - 'name': _('All ICMP'), - 'ip_protocol': 'icmp', - 'from_port': '-1', - 'to_port': '-1', - }, - 'ssh': { - 'name': 'SSH', - 'ip_protocol': 'tcp', - 'from_port': '22', - 'to_port': '22', - }, - 'smtp': { - 'name': 'SMTP', - 'ip_protocol': 'tcp', - 'from_port': '25', - 'to_port': '25', - }, - 'dns': { - 'name': 'DNS', - 'ip_protocol': 'tcp', - 'from_port': '53', - 'to_port': '53', - }, - 'http': { - 'name': 'HTTP', - 'ip_protocol': 'tcp', - 'from_port': '80', - 'to_port': '80', - }, - 'pop3': { - 'name': 'POP3', - 'ip_protocol': 'tcp', - 'from_port': '110', - 'to_port': '110', - }, - 'imap': { - 'name': 'IMAP', - 'ip_protocol': 'tcp', - 'from_port': '143', - 'to_port': '143', - }, - 'ldap': { - 'name': 'LDAP', - 'ip_protocol': 'tcp', - 'from_port': '389', - 'to_port': '389', - }, - 'https': { - 'name': 'HTTPS', - 'ip_protocol': 'tcp', - 'from_port': '443', - 'to_port': '443', - }, - 'smtps': { - 'name': 'SMTPS', - 'ip_protocol': 'tcp', - 'from_port': '465', - 'to_port': '465', - }, - 'imaps': { - 'name': 'IMAPS', - 'ip_protocol': 'tcp', - 'from_port': '993', - 'to_port': '993', - }, - 'pop3s': { - 'name': 'POP3S', - 'ip_protocol': 'tcp', - 'from_port': '995', - 'to_port': '995', - }, - 'ms_sql': { - 'name': 'MS SQL', - 'ip_protocol': 'tcp', - 'from_port': '1433', - 'to_port': '1433', - }, - 'mysql': { - 'name': 'MYSQL', - 'ip_protocol': 'tcp', - 'from_port': '3306', - 'to_port': '3306', - }, - 'rdp': { - 'name': 'RDP', - 'ip_protocol': 'tcp', - 'from_port': '3389', - 'to_port': '3389', - }, -} - -# Deprecation Notice: -# -# The setting FLAVOR_EXTRA_KEYS has been deprecated. -# Please load extra spec metadata into the Glance Metadata Definition Catalog. -# -# The sample quota definitions can be found in: -# /etc/metadefs/compute-quota.json -# -# The metadata definition catalog supports CLI and API: -# $glance --os-image-api-version 2 help md-namespace-import -# $glance-manage db_load_metadefs -# -# See Metadata Definitions on: http://docs.openstack.org/developer/glance/ - -# Indicate to the Sahara data processing service whether or not -# automatic floating IP allocation is in effect. If it is not -# in effect, the user will be prompted to choose a floating IP -# pool for use in their cluster. False by default. You would want -# to set this to True if you were running Nova Networking with -# auto_assign_floating_ip = True. -# SAHARA_AUTO_IP_ALLOCATION_ENABLED = False - -# The hash algorithm to use for authentication tokens. This must -# match the hash algorithm that the identity server and the -# auth_token middleware are using. Allowed values are the -# algorithms supported by Python's hashlib library. -# OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' -LOGIN_URL='/horizon/auth/login/' -LOGOUT_URL='/horizon/auth/logout/' -LOGIN_REDIRECT_URL='/horizon/' diff --git a/openstack/etc/tempest/tempest.conf b/openstack/etc/tempest/tempest.conf deleted file mode 100644 index 05f0eca1..00000000 --- a/openstack/etc/tempest/tempest.conf +++ /dev/null @@ -1,1116 +0,0 @@ -[DEFAULT] - -# -# From tempest.config -# - -# Whether to disable inter-process locks (boolean value) -#disable_process_locking = false - -# Directory to use for lock files. (string value) -lock_path = /run/lock - -# -# From tempest.config -# - -# Print debugging output (set logging level to DEBUG instead of -# default WARNING level). (boolean value) -#debug = false - -# Print more verbose output (set logging level to INFO instead of -# default WARNING level). (boolean value) -#verbose = false - -# -# From tempest.config -# - -# The name of a logging configuration file. This file is appended to -# any existing logging configuration files. For details about logging -# configuration files, see the Python logging module documentation. -# (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Format string for %%(asctime)s in log records. Default: %(default)s -# . (string value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) The base directory used for relative --log-file paths. -# (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# (Optional) Name of log file to output to. If no default is set, -# logging will go to stdout. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# DEPRECATED. A logging.Formatter log message format string which may -# use any of the available logging.LogRecord attributes. This option -# is deprecated. Please use logging_context_format_string and -# logging_default_format_string instead. (string value) -#log_format = - -# Syslog facility to receive log lines. (string value) -#syslog_log_facility = LOG_USER - -# Use syslog for logging. Existing syslog format is DEPRECATED during -# I, and will change in J to honor RFC5424. (boolean value) -use_syslog = true - -# (Optional) Enables or disables syslog rfc5424 format for logging. If -# enabled, prefixes the MSG part of the syslog message with APP-NAME -# (RFC5424). The format without the APP-NAME is deprecated in I, and -# will be removed in J. (boolean value) -#use_syslog_rfc_format = false - -# -# From tempest.config -# - -# Log output to standard error. (boolean value) -#use_stderr = true - -# -# From tempest.config -# - -# List of logger=LEVEL pairs. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - -# The format for an instance that is passed with the log message. -# (string value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. -# (string value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Format string to use for log messages without context. (string -# value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Prefix each line of exception output with this format. (string -# value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - - -[auth] - -# -# From tempest.config -# - -# Allows test cases to create/destroy tenants and users. This option -# requires that OpenStack Identity API admin credentials are known. If -# false, isolated test cases and parallel execution, can still be -# achieved configuring a list of test accounts (boolean value) -# Deprecated group/name - [compute]/allow_tenant_isolation -# Deprecated group/name - [orchestration]/allow_tenant_isolation -allow_tenant_isolation = true - -# If set to True it enables the Accounts provider, which locks -# credentials to allow for parallel execution with pre-provisioned -# accounts. It can only be used to run tests that ensure credentials -# cleanup happens. It requires at least `2 * CONC` distinct accounts -# configured in `test_accounts_file`, with CONC == the number of -# concurrent test processes. (boolean value) -#locking_credentials_provider = false - -# Path to the yaml file that contains the list of credentials to use -# for running tests (string value) -#test_accounts_file = etc/accounts.yaml - - -[baremetal] - -# -# From tempest.config -# - -# Timeout for Ironic node to completely provision (integer value) -#active_timeout = 300 - -# Timeout for association of Nova instance and Ironic node (integer -# value) -#association_timeout = 30 - -# Catalog type of the baremetal provisioning service (string value) -#catalog_type = baremetal - -# Driver name which Ironic uses (string value) -#driver = fake - -# Whether the Ironic nova-compute driver is enabled (boolean value) -#driver_enabled = false - -# The endpoint type to use for the baremetal provisioning service -# (string value) -#endpoint_type = publicURL - -# Timeout for Ironic power transitions. (integer value) -#power_timeout = 60 - -# Timeout for unprovisioning an Ironic node. (integer value) -#unprovision_timeout = 60 - - -[boto] - -# -# From tempest.config -# - -# AKI Kernel Image manifest (string value) -#aki_manifest = cirros-0.3.0-x86_64-vmlinuz.manifest.xml - -# AMI Machine Image manifest (string value) -#ami_manifest = cirros-0.3.0-x86_64-blank.img.manifest.xml - -# ARI Ramdisk Image manifest (string value) -#ari_manifest = cirros-0.3.0-x86_64-initrd.manifest.xml - -# AWS Access Key (string value) -#aws_access = - -# AWS Secret Key (string value) -#aws_secret = - -# AWS Zone for EC2 tests (string value) -#aws_zone = nova - -# Status Change Test Interval (integer value) -#build_interval = 1 - -# Status Change Timeout (integer value) -#build_timeout = 60 - -# EC2 URL (string value) -#ec2_url = http://localhost:8773/services/Cloud - -# boto Http socket timeout (integer value) -#http_socket_timeout = 3 - -# Instance type (string value) -#instance_type = m1.tiny - -# boto num_retries on error (integer value) -#num_retries = 1 - -# S3 Materials Path (string value) -#s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.0 - -# S3 URL (string value) -#s3_url = http://localhost:8080 - - -[cli] - -# -# From tempest.config -# - -# directory where python client binaries are located (string value) -cli_dir = /usr/bin - -# enable cli tests (boolean value) -#enabled = true - -# Whether the tempest run location has access to the *-manage -# commands. In a pure blackbox environment it will not. (boolean -# value) -#has_manage = true - -# Number of seconds to wait on a CLI timeout (integer value) -#timeout = 15 - - -[compute] - -# -# From tempest.config -# - -# Time in seconds between build status checks. (integer value) -#build_interval = 1 - -# Timeout in seconds to wait for an instance to build. (integer value) -#build_timeout = 300 - -# Catalog type of the Compute service. (string value) -#catalog_type = compute - -# Catalog type of the Compute v3 service. (string value) -#catalog_v3_type = computev3 - -# The endpoint type to use for the compute service. (string value) -#endpoint_type = publicURL - -# Visible fixed network name (string value) -#fixed_network_name = private - -# Valid primary flavor to use in tests. (string value) -#flavor_ref = 1 - -# Valid secondary flavor to be used in tests. (string value) -#flavor_ref_alt = 2 - -# Unallocated floating IP range, which will be used to test the -# floating IP bulk feature for CRUD operation. (string value) -#floating_ip_range = 10.0.0.0/29 - -# Password used to authenticate to an instance using the alternate -# image. (string value) -#image_alt_ssh_password = password - -# User name used to authenticate to an instance using the alternate -# image. (string value) -#image_alt_ssh_user = root - -# Valid primary image reference to be used in tests. This is a -# required option (string value) -#image_ref = - -# Valid secondary image reference to be used in tests. This is a -# required option, but if only one image is available duplicate the -# value of image_ref above (string value) -#image_ref_alt = - -# Password used to authenticate to an instance. (string value) -#image_ssh_password = password - -# User name used to authenticate to an instance. (string value) -#image_ssh_user = root - -# IP version used for SSH connections. (integer value) -#ip_version_for_ssh = 4 - -# Network used for SSH connections. (string value) -#network_for_ssh = public - -# Path to a private key file for SSH access to remote hosts (string -# value) -#path_to_private_key = - -# Timeout in seconds to wait for ping to succeed. (integer value) -#ping_timeout = 120 - -# Additional wait time for clean state, when there is no OS-EXT-STS -# extension available (integer value) -#ready_wait = 0 - -# The compute region name to use. If empty, the value of -# identity.region is used instead. If no such region is found in the -# service catalog, the first found one is used. (string value) -#region = - -# Should the tests ssh to instances? (boolean value) -#run_ssh = false - -# Time in seconds before a shelved instance is eligible for removing -# from a host. -1 never offload, 0 offload when shelved. This time -# should be the same as the time of nova.conf, and some tests will run -# for as long as the time. (integer value) -#shelved_offload_time = 0 - -# Auth method used for authenticate to the instance. Valid choices -# are: keypair, configured, adminpass. keypair: start the servers with -# an ssh keypair. configured: use the configured user and password. -# adminpass: use the injected adminPass. disabled: avoid using ssh -# when it is an option. (string value) -#ssh_auth_method = keypair - -# Timeout in seconds to wait for output from ssh channel. (integer -# value) -#ssh_channel_timeout = 60 - -# How to connect to the instance? fixed: using the first ip belongs -# the fixed network floating: creating and using a floating ip (string -# value) -#ssh_connect_method = fixed - -# Timeout in seconds to wait for authentication to succeed. (integer -# value) -#ssh_timeout = 300 - -# User name used to authenticate to an instance. (string value) -#ssh_user = root - -# Does SSH use Floating IPs? (boolean value) -#use_floatingip_for_ssh = true - -# Expected device name when a volume is attached to an instance -# (string value) -#volume_device_name = vdb - - -[compute-admin] - -# -# From tempest.config -# - -# Domain name for authentication as admin (Keystone V3).The same -# domain applies to user and project (string value) -#domain_name = - -# API key to use when authenticating as admin. (string value) -password = {{ NOVA_SERVICE_PASSWORD }} - -# Administrative Tenant name to use for Nova API requests. (string -# value) -tenant_name = service - -# Administrative Username to use for Nova API requests. (string value) -username = {{ NOVA_SERVICE_USER }} - - -[compute-feature-enabled] - -# -# From tempest.config -# - -# A list of enabled compute extensions with a special entry all which -# indicates every extension is enabled. Each extension should be -# specified with alias name. Empty list indicates all extensions are -# disabled (list value) -#api_extensions = all - -# If false, skip all nova v3 tests. (boolean value) -api_v3 = false - -# A list of enabled v3 extensions with a special entry all which -# indicates every extension is enabled. Each extension should be -# specified with alias name. Empty list indicates all extensions are -# disabled (list value) -#api_v3_extensions = all - -# Does the test environment block migration support cinder iSCSI -# volumes (boolean value) -#block_migrate_cinder_iscsi = false - -# Does the test environment use block devices for live migration -# (boolean value) -#block_migration_for_live_migration = false - -# Does the test environment support changing the admin password? -# (boolean value) -#change_password = false - -# Does the test environment support obtaining instance serial console -# output? (boolean value) -#console_output = true - -# If false, skip disk config tests (boolean value) -#disk_config = true - -# Enables returning of the instance password by the relevant server -# API calls such as create, rebuild or rescue. (boolean value) -#enable_instance_password = true - -# Does the test environment support dynamic network interface -# attachment? (boolean value) -#interface_attach = true - -# Does the test environment support live migration available? (boolean -# value) -#live_migration = false - -# Does the test environment support pausing? (boolean value) -#pause = true - -# Enable RDP console. This configuration value should be same as -# [nova.rdp]->enabled in nova.conf (boolean value) -#rdp_console = false - -# Does the test environment support instance rescue mode? (boolean -# value) -#rescue = true - -# Does the test environment support resizing? (boolean value) -#resize = false - -# Does the test environment support shelving/unshelving? (boolean -# value) -#shelve = true - -# Does the test environment support creating snapshot images of -# running instances? (boolean value) -snapshot = true - -# Enable Spice console. This configuration value should be same as -# [nova.spice]->enabled in nova.conf (boolean value) -spice_console = false - -# Does the test environment support suspend/resume? (boolean value) -#suspend = true - -# Enable VNC console. This configuration value should be same as -# [nova.vnc]->vnc_enabled in nova.conf (boolean value) -vnc_console = true - -# If false skip all v2 api tests with xml (boolean value) -#xml_api_v2 = true - - -[dashboard] - -# -# From tempest.config -# - -# Where the dashboard can be found (string value) -dashboard_url = http://{{ CONTROLLER_HOST_ADDRESS }}/horizon - -# Login page for the dashboard (string value) -login_url = http://{{ CONTROLLER_HOST_ADDRESS }}/horizon/auth/login/ - - -[data_processing] - -# -# From tempest.config -# - -# Catalog type of the data processing service. (string value) -#catalog_type = data_processing - -# The endpoint type to use for the data processing service. (string -# value) -#endpoint_type = publicURL - - -[database] - -# -# From tempest.config -# - -# Catalog type of the Database service. (string value) -#catalog_type = database - -# Current database version to use in database tests. (string value) -#db_current_version = v1.0 - -# Valid primary flavor to use in database tests. (string value) -#db_flavor_ref = 1 - - -[debug] - -# -# From tempest.config -# - -# Enable diagnostic commands (boolean value) -#enable = true - -# A regex to determine which requests should be traced. This is a -# regex to match the caller for rest client requests to be able to -# selectively trace calls out of specific classes and methods. It -# largely exists for test development, and is not expected to be used -# in a real deploy of tempest. This will be matched against the -# discovered ClassName:method in the test environment. Expected -# values for this field are: * ClassName:test_method_name - traces -# one test_method * ClassName:setUp(Class) - traces specific setup -# functions * ClassName:tearDown(Class) - traces specific teardown -# functions * ClassName:_run_cleanups - traces the cleanup functions -# If nothing is specified, this feature is not enabled. To trace -# everything specify .* as the regex. (string value) -#trace_requests = - - -[identity] - -# -# From tempest.config -# - -# Admin domain name for authentication (Keystone V3).The same domain -# applies to user and project (string value) -#admin_domain_name = - -# API key to use when authenticating as admin. (string value) -admin_password = {{ KEYSTONE_ADMIN_PASSWORD }} - -# Role required to administrate keystone. (string value) -admin_role = admin - -# Administrative Tenant name to use for Keystone API requests. (string -# value) -admin_tenant_name = admin - -# Administrative Username to use for Keystone API requests. (string -# value) -admin_username = admin - -# Alternate domain name for authentication (Keystone V3).The same -# domain applies to user and project (string value) -#alt_domain_name = - -# API key to use when authenticating as alternate user. (string value) -#alt_password = - -# Alternate user's Tenant name to use for Nova API requests. (string -# value) -#alt_tenant_name = - -# Username of alternate user to use for Nova API requests. (string -# value) -#alt_username = - -# Identity API version to be used for authentication for API tests. -# (string value) -auth_version = v2 - -# Catalog type of the Identity service. (string value) -catalog_type = identity - -# Set to True if using self-signed SSL certificates. (boolean value) -#disable_ssl_certificate_validation = false - -# Domain name for authentication (Keystone V3).The same domain applies -# to user and project (string value) -#domain_name = - -# The endpoint type to use for the identity service. (string value) -#endpoint_type = publicURL - -# API key to use when authenticating. (string value) -password = {{ NOVA_SERVICE_PASSWORD }} - -# The identity region name to use. Also used as the other services' -# region name unless they are set explicitly. If no such region is -# found in the service catalog, the first found one is used. (string -# value) -#region = RegionOne - -# Tenant name to use for Nova API requests. (string value) -tenant_name = service - -# Full URI of the OpenStack Identity API (Keystone), v2 (string value) -uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0/ - -# Full URI of the OpenStack Identity API (Keystone), v3 (string value) -# -# Tempest complains if we don't set any uri_v3, even if it's disabled. -uri_v3 = - -# Username to use for Nova API requests. (string value) -username = {{ NOVA_SERVICE_USER }} - - -[identity-feature-enabled] - -# -# From tempest.config -# - -# Is the v2 identity API enabled (boolean value) -api_v2 = true - -# Is the v3 identity API enabled (boolean value) -api_v3 = false - -# Does the identity service have delegation and impersonation enabled -# (boolean value) -#trust = true - - -[image] - -# -# From tempest.config -# - -# Catalog type of the Image service. (string value) -catalog_type = image - -# The endpoint type to use for the image service. (string value) -endpoint_type = publicURL - -# http accessible image (string value) -http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz - -# The image region name to use. If empty, the value of identity.region -# is used instead. If no such region is found in the service catalog, -# the first found one is used. (string value) -#region = - - -[image-feature-enabled] - -# -# From tempest.config -# - -# Is the v1 image API enabled (boolean value) -#api_v1 = true - -# Is the v2 image API enabled (boolean value) -api_v2 = true - - -[input-scenario] - -# -# From tempest.config -# - -# Matching flavors become parameters for scenario tests (string value) -#flavor_regex = ^m1.nano$ - -# Matching images become parameters for scenario tests (string value) -#image_regex = ^cirros-0.3.1-x86_64-uec$ - -# SSH verification in tests is skippedfor matching images (string -# value) -#non_ssh_image_regex = ^.*[Ww]in.*$ - -# List of user mapped to regex to matching image names. (string value) -#ssh_user_regex = [["^.*[Cc]irros.*$", "root"]] - - -[messaging] - -# -# From tempest.config -# - -# Catalog type of the Messaging service. (string value) -#catalog_type = messaging - -# The maximum grace period for a claim (integer value) -#max_claim_grace = 43200 - -# The maximum ttl for a claim (integer value) -#max_claim_ttl = 43200 - -# The maximum size of a message body (integer value) -#max_message_size = 262144 - -# The maximum ttl for a message (integer value) -#max_message_ttl = 1209600 - -# The maximum number of messages per claim (integer value) -#max_messages_per_claim = 20 - -# The maximum number of queue message per page when listing (or) -# posting messages (integer value) -#max_messages_per_page = 20 - -# The maximum metadata size for a queue (integer value) -#max_queue_metadata = 65536 - -# The maximum number of queue records per page when listing queues -# (integer value) -#max_queues_per_page = 20 - - -[negative] - -# -# From tempest.config -# - -# Test generator class for all negative tests (string value) -#test_generator = tempest.common.generator.negative_generator.NegativeTestGenerator - - -[network] - -# -# From tempest.config -# - -# Time in seconds between network operation status checks. (integer -# value) -#build_interval = 1 - -# Timeout in seconds to wait for network operation to complete. -# (integer value) -#build_timeout = 300 - -# Catalog type of the Neutron service. (string value) -#catalog_type = network - -# List of dns servers whichs hould be used for subnet creation (list -# value) -#dns_servers = 8.8.8.8,8.8.4.4 - -# The endpoint type to use for the network service. (string value) -#endpoint_type = publicURL - -# Id of the public network that provides external connectivity (string -# value) -#public_network_id = - -# Id of the public router that provides external connectivity (string -# value) -#public_router_id = - -# The network region name to use. If empty, the value of -# identity.region is used instead. If no such region is found in the -# service catalog, the first found one is used. (string value) -#region = - -# The cidr block to allocate tenant ipv4 subnets from (string value) -#tenant_network_cidr = 10.100.0.0/16 - -# The mask bits for tenant ipv4 subnets (integer value) -#tenant_network_mask_bits = 28 - -# The cidr block to allocate tenant ipv6 subnets from (string value) -#tenant_network_v6_cidr = 2003::/48 - -# The mask bits for tenant ipv6 subnets (integer value) -#tenant_network_v6_mask_bits = 64 - -# Whether tenant network connectivity should be evaluated directly -# (boolean value) -#tenant_networks_reachable = false - - -[network-feature-enabled] - -# -# From tempest.config -# - -# A list of enabled network extensions with a special entry all which -# indicates every extension is enabled. Empty list indicates all -# extensions are disabled (list value) -#api_extensions = all - -# Allow the execution of IPv6 tests (boolean value) -#ipv6 = true - -# Allow the execution of IPv6 subnet tests that use the extended IPv6 -# attributes ipv6_ra_mode and ipv6_address_mode (boolean value) -#ipv6_subnet_attributes = false - - -[object-storage] - -# -# From tempest.config -# - -# Catalog type of the Object-Storage service. (string value) -#catalog_type = object-store - -# Number of seconds to wait while looping to check the status of a -# container to container synchronization (integer value) -#container_sync_interval = 5 - -# Number of seconds to time on waiting for a container to container -# synchronization complete. (integer value) -#container_sync_timeout = 120 - -# The endpoint type to use for the object-store service. (string -# value) -#endpoint_type = publicURL - -# Role to add to users created for swift tests to enable creating -# containers (string value) -#operator_role = Member - -# The object-storage region name to use. If empty, the value of -# identity.region is used instead. If no such region is found in the -# service catalog, the first found one is used. (string value) -#region = - -# User role that has reseller admin (string value) -#reseller_admin_role = ResellerAdmin - - -[object-storage-feature-enabled] - -# -# From tempest.config -# - -# Execute (old style) container-sync tests (boolean value) -#container_sync = true - -# Execute discoverability tests (boolean value) -#discoverability = true - -# A list of the enabled optional discoverable apis. A single entry, -# all, indicates that all of these features are expected to be enabled -# (list value) -#discoverable_apis = all - -# Execute object-versioning tests (boolean value) -#object_versioning = true - - -[orchestration] - -# -# From tempest.config -# - -# Time in seconds between build status checks. (integer value) -#build_interval = 1 - -# Timeout in seconds to wait for a stack to build. (integer value) -#build_timeout = 1200 - -# Catalog type of the Orchestration service. (string value) -#catalog_type = orchestration - -# The endpoint type to use for the orchestration service. (string -# value) -#endpoint_type = publicURL - -# Name of heat-cfntools enabled image to use when launching test -# instances. (string value) -#image_ref = - -# Instance type for tests. Needs to be big enough for a full OS plus -# the test workload (string value) -#instance_type = m1.micro - -# Name of existing keypair to launch servers with. (string value) -#keypair_name = - -# Value must match heat configuration of the same name. (integer -# value) -#max_resources_per_stack = 1000 - -# Value must match heat configuration of the same name. (integer -# value) -#max_template_size = 524288 - -# The orchestration region name to use. If empty, the value of -# identity.region is used instead. If no such region is found in the -# service catalog, the first found one is used. (string value) -#region = - - -[scenario] - -# -# From tempest.config -# - -# AKI image file name (string value) -#aki_img_file = cirros-0.3.1-x86_64-vmlinuz - -# AMI image file name (string value) -#ami_img_file = cirros-0.3.1-x86_64-blank.img - -# ARI image file name (string value) -#ari_img_file = cirros-0.3.1-x86_64-initrd - -# Image container format (string value) -#img_container_format = bare - -# Directory containing image files (string value) -#img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec - -# Image disk format (string value) -#img_disk_format = qcow2 - -# Image file name (string value) -# Deprecated group/name - [DEFAULT]/qcow2_img_file -#img_file = cirros-0.3.1-x86_64-disk.img - -# specifies how many resources to request at once. Used for large -# operations testing. (integer value) -#large_ops_number = 0 - -# ssh username for the image file (string value) -#ssh_user = cirros - - -[service_available] - -# -# From tempest.config -# - -# Whether or not Ceilometer is expected to be available (boolean -# value) -ceilometer = false - -# Whether or not cinder is expected to be available (boolean value) -cinder = true - -# Whether or not glance is expected to be available (boolean value) -glance = true - -# Whether or not Heat is expected to be available (boolean value) -heat = false - -# Whether or not Horizon is expected to be available (boolean value) -horizon = true - -# Whether or not Ironic is expected to be available (boolean value) -ironic = false - -# Whether or not neutron is expected to be available (boolean value) -neutron = true - -# Whether or not nova is expected to be available (boolean value) -nova = true - -# Whether or not Sahara is expected to be available (boolean value) -sahara = false - -# Whether or not swift is expected to be available (boolean value) -swift = false - -# Whether or not Trove is expected to be available (boolean value) -trove = false - -# Whether or not Zaqar is expected to be available (boolean value) -zaqar = false - - -[stress] - -# -# From tempest.config -# - -# Controller host. (string value) -#controller = - -# The number of threads created while stress test. (integer value) -#default_thread_number_per_action = 4 - -# Allows a full cleaning process after a stress test. Caution : this -# cleanup will remove every objects of every tenant. (boolean value) -#full_clean_stack = false - -# Prevent the cleaning (tearDownClass()) between each stress test run -# if an exception occurs during this run. (boolean value) -#leave_dirty_stack = false - -# time (in seconds) between log file error checks. (integer value) -#log_check_interval = 60 - -# Maximum number of instances to create during test. (integer value) -#max_instances = 16 - -# Directory containing log files on the compute nodes (string value) -#nova_logdir = - -# Controller host. (string value) -#target_controller = - -# regexp for list of log files. (string value) -#target_logfiles = - -# Path to private key. (string value) -#target_private_key_path = - -# ssh user. (string value) -#target_ssh_user = - - -[telemetry] - -# -# From tempest.config -# - -# Catalog type of the Telemetry service. (string value) -#catalog_type = metering - -# The endpoint type to use for the telemetry service. (string value) -#endpoint_type = publicURL - -# This variable is used as flag to enable notification tests (boolean -# value) -#too_slow_to_test = true - - -[volume] - -# -# From tempest.config -# - -# Name of the backend1 (must be declared in cinder.conf) (string -# value) -backend1_name = LVM_iSCSI - -# Name of the backend2 (must be declared in cinder.conf) (string -# value) -#backend2_name = BACKEND_2 - -# Time in seconds between volume availability checks. (integer value) -#build_interval = 1 - -# Timeout in seconds to wait for a volume to become available. -# (integer value) -#build_timeout = 300 - -# Catalog type of the Volume Service (string value) -catalog_type = volume - -# Disk format to use when copying a volume to image (string value) -disk_format = raw - -# The endpoint type to use for the volume service. (string value) -endpoint_type = publicURL - -# The volume region name to use. If empty, the value of -# identity.region is used instead. If no such region is found in the -# service catalog, the first found one is used. (string value) -#region = - -# Backend protocol to target when creating volume types (string value) -storage_protocol = iSCSI - -# Backend vendor to target when creating volume types (string value) -#vendor_name = Open Source - -# Default size in GB for volumes created by volumes tests (integer -# value) -volume_size = 1 - - -[volume-feature-enabled] - -# -# From tempest.config -# - -# A list of enabled volume extensions with a special entry all which -# indicates every extension is enabled. Empty list indicates all -# extensions are disabled (list value) -#api_extensions = all - -# Is the v1 volume API enabled (boolean value) -api_v1 = true - -# Is the v2 volume API enabled (boolean value) -api_v2 = true - -# Runs Cinder volumes backup test (boolean value) -backup = true - -# Runs Cinder multi-backend test (requires 2 backends) (boolean value) -multi_backend = false - -# Runs Cinder volume snapshot test (boolean value) -snapshot = true diff --git a/openstack/manifest b/openstack/manifest deleted file mode 100644 index aa4d5430..00000000 --- a/openstack/manifest +++ /dev/null @@ -1,190 +0,0 @@ -0040755 0 0 /etc/horizon -0100644 0 0 /etc/horizon/apache-horizon.conf -0040755 0 0 /etc/horizon/openstack_dashboard -0100644 0 0 /etc/horizon/openstack_dashboard/local_settings.py -template 0100644 0 0 /etc/tempest/tempest.conf -0040755 0 0 /usr/share/openstack -0100644 0 0 /usr/share/openstack/hosts -0040755 0 0 /usr/share/openstack/ceilometer -0100644 0 0 /usr/share/openstack/ceilometer-config.yml -0100644 0 0 /usr/share/openstack/ceilometer-db.yml -0100644 0 0 /usr/share/openstack/ceilometer/ceilometer.conf -0040755 0 0 /usr/share/openstack/cinder -0100644 0 0 /usr/share/openstack/cinder-config.yml -0100644 0 0 /usr/share/openstack/cinder-db.yml -0100644 0 0 /usr/share/openstack/cinder-lvs.yml -0100644 0 0 /usr/share/openstack/cinder/cinder.conf -0100644 0 0 /usr/share/openstack/cinder/api-paste.ini -0100644 0 0 /usr/share/openstack/cinder/policy.json -0040755 0 0 /usr/share/openstack/extras -0100644 0 0 /usr/share/openstack/extras/00-disable-device.network -0100644 0 0 /usr/share/openstack/extras/60-device-dhcp.network -0100644 0 0 /usr/share/openstack/glance.yml -0040755 0 0 /usr/share/openstack/glance -0100644 0 0 /usr/share/openstack/glance/logging.conf -0100644 0 0 /usr/share/openstack/glance/glance-api.conf -0100644 0 0 /usr/share/openstack/glance/glance-registry.conf -0100644 0 0 /usr/share/openstack/glance/glance-scrubber.conf -0100644 0 0 /usr/share/openstack/glance/glance-cache.conf -0100644 0 0 /usr/share/openstack/glance/schema-image.json -0100644 0 0 /usr/share/openstack/glance/policy.json -0100644 0 0 /usr/share/openstack/glance/glance-api-paste.ini -0100644 0 0 /usr/share/openstack/glance/glance-registry-paste.ini -0100644 0 0 /usr/share/openstack/horizon.yml -0040755 0 0 /usr/share/openstack/ironic -0100644 0 0 /usr/share/openstack/ironic.yml -0100644 0 0 /usr/share/openstack/ironic/ironic.conf -0100644 0 0 /usr/share/openstack/ironic/policy.json -0100644 0 0 /usr/share/openstack/iscsi.yml -0100644 0 0 /usr/share/openstack/keystone.yml -0040755 0 0 /usr/share/openstack/keystone -0100644 0 0 /usr/share/openstack/keystone/logging.conf -0100644 0 0 /usr/share/openstack/keystone/keystone.conf -0100644 0 0 /usr/share/openstack/keystone/policy.json -0100644 0 0 /usr/share/openstack/keystone/keystone-paste.ini -0100644 0 0 /usr/share/openstack/network.yml -0040755 0 0 /usr/share/openstack/neutron -0100644 0 0 /usr/share/openstack/neutron-config.yml -0100644 0 0 /usr/share/openstack/neutron-db.yml -0100644 0 0 /usr/share/openstack/neutron/neutron.conf -0100644 0 0 /usr/share/openstack/neutron/api-paste.ini -0100644 0 0 /usr/share/openstack/neutron/policy.json -0100644 0 0 /usr/share/openstack/neutron/l3_agent.ini -0100644 0 0 /usr/share/openstack/neutron/dhcp_agent.ini -0100644 0 0 /usr/share/openstack/neutron/lbaas_agent.ini -0100644 0 0 /usr/share/openstack/neutron/metadata_agent.ini -0100644 0 0 /usr/share/openstack/neutron/fwaas_driver.ini -0100644 0 0 /usr/share/openstack/neutron/metering_agent.ini -0100644 0 0 /usr/share/openstack/neutron/vpn_agent.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/ -0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch -0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl -0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs -0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs -0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README -0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README -0040755 0 0 /usr/share/openstack/neutron/plugins/brocade -0100644 0 0 /usr/share/openstack/neutron/plugins/brocade/brocade.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/cisco -0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/embrane -0100644 0 0 /usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/hyperv -0100644 0 0 /usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/ibm -0100644 0 0 /usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/linuxbridge -0100644 0 0 /usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/metaplugin -0100644 0 0 /usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/midonet -0100644 0 0 /usr/share/openstack/neutron/plugins/midonet/midonet.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/ml2 -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini -0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/mlnx -0100644 0 0 /usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/nec -0100644 0 0 /usr/share/openstack/neutron/plugins/nec/nec.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/nuage -0100644 0 0 /usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/oneconvergence -0100644 0 0 /usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/opencontrail -0100644 0 0 /usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/openvswitch -0100644 0 0 /usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/plumgrid -0100644 0 0 /usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini -0040755 0 0 /usr/share/openstack/neutron/plugins/vmware -0100644 0 0 /usr/share/openstack/neutron/plugins/vmware/nsx.ini -0040755 0 0 /usr/share/openstack/nova -0100644 0 0 /usr/share/openstack/nova-config.yml -0100644 0 0 /usr/share/openstack/nova-db.yml -0100644 0 0 /usr/share/openstack/nova/logging.conf -0100644 0 0 /usr/share/openstack/nova/nova.conf -0100644 0 0 /usr/share/openstack/nova/nova-compute.conf -0100644 0 0 /usr/share/openstack/nova/policy.json -0100644 0 0 /usr/share/openstack/nova/cells.json -0100644 0 0 /usr/share/openstack/nova/api-paste.ini -0100644 0 0 /usr/share/openstack/openvswitch.yml -0040755 0 0 /usr/share/openstack/postgres -0100644 0 0 /usr/share/openstack/postgres.yml -0100644 0 0 /usr/share/openstack/postgres/pg_hba.conf -0100644 0 0 /usr/share/openstack/postgres/postgresql.conf -0040755 0 0 /usr/share/openstack/rabbitmq -0100644 0 0 /usr/share/openstack/rabbitmq/rabbitmq-env.conf -0100644 0 0 /usr/share/openstack/rabbitmq/rabbitmq.config -0040755 0 0 /usr/lib/sysctl.d -0100644 0 0 /usr/lib/sysctl.d/neutron.conf -0100644 0 0 /usr/lib/systemd/system/apache-httpd.service -0100644 0 0 /usr/lib/systemd/system/iscsi-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-keystone.service -0100644 0 0 /usr/lib/systemd/system/openstack-keystone-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-glance-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-glance-api.service -0100644 0 0 /usr/lib/systemd/system/openstack-glance-registry.service -0100644 0 0 /usr/lib/systemd/system/openstack-horizon-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-ironic-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-ironic-api.service -0100644 0 0 /usr/lib/systemd/system/openstack-ironic-conductor.service -0100644 0 0 /usr/lib/systemd/system/openstack-network-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-neutron-config-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-neutron-db-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-neutron-server.service -0100644 0 0 /usr/lib/systemd/system/openstack-neutron-metadata-agent.service -0100644 0 0 /usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service -0100644 0 0 /usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service -0100644 0 0 /usr/lib/systemd/system/openstack-neutron-dhcp-agent.service -0100644 0 0 /usr/lib/systemd/system/openstack-neutron-l3-agent.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-config-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-db-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-compute.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-conductor.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-api.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-scheduler.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-consoleauth.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-novncproxy.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-cert.service -0100644 0 0 /usr/lib/systemd/system/openstack-nova-serialproxy.service -0100644 0 0 /usr/lib/systemd/system/rabbitmq-server.service -0100644 0 0 /usr/lib/systemd/system/openstack-cinder-config-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-cinder-db-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-cinder-lv-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-cinder-api.service -0100644 0 0 /usr/lib/systemd/system/openstack-cinder-scheduler.service -0100644 0 0 /usr/lib/systemd/system/openstack-cinder-volume.service -0100644 0 0 /usr/lib/systemd/system/openstack-cinder-backup.service -0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-config-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-db-setup.service -0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service -0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service -0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-api.service -0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-central.service -0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-collector.service -0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-compute.service -0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-notification.service -0100644 0 0 /usr/lib/systemd/system/openvswitch-setup.service -0100644 0 0 /usr/lib/systemd/system/openvswitch-db-server.service -0100644 0 0 /usr/lib/systemd/system/openvswitch.service -0100644 0 0 /usr/lib/systemd/system/postgres-server.service -0100644 0 0 /usr/lib/systemd/system/postgres-server-setup.service -0100644 0 0 /usr/share/openstack/swift-controller.yml -0100644 0 0 /usr/lib/systemd/system/swift-controller-setup.service -0100644 0 0 /usr/lib/systemd/system/swift-proxy.service -0040755 0 0 /usr/share/swift -0040755 0 0 /usr/share/swift/etc -0040755 0 0 /usr/share/swift/etc/swift -0100644 0 0 /usr/share/swift/etc/swift/proxy-server.j2 diff --git a/openstack/usr/lib/sysctl.d/neutron.conf b/openstack/usr/lib/sysctl.d/neutron.conf deleted file mode 100644 index 644ca116..00000000 --- a/openstack/usr/lib/sysctl.d/neutron.conf +++ /dev/null @@ -1,3 +0,0 @@ -# Disable rp filtering, enabling forwarding is handled by networkd -net.ipv4.conf.all.rp_filter=0 -net.ipv4.conf.default.rp_filter=0 diff --git a/openstack/usr/lib/systemd/system/apache-httpd.service b/openstack/usr/lib/systemd/system/apache-httpd.service deleted file mode 100644 index e2a840c6..00000000 --- a/openstack/usr/lib/systemd/system/apache-httpd.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=Apache Web Server -After=network.target remote-fs.target nss-lookup.target -Wants=network.target - -[Service] -Type=forking -PIDFile=/var/run/httpd.pid -ExecStart=/usr/sbin/apachectl start -ExecStop=/usr/sbin/apachectl graceful-stop -ExecReload=/usr/sbin/apachectl graceful -PrivateTmp=true -LimitNOFILE=infinity - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/iscsi-setup.service b/openstack/usr/lib/systemd/system/iscsi-setup.service deleted file mode 100644 index 4cb10045..00000000 --- a/openstack/usr/lib/systemd/system/iscsi-setup.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Run iscsi-setup Ansible scripts -Before=iscsid.service target.service -Wants=iscsid.service target.service - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/iscsi.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service deleted file mode 100644 index 6e3ada59..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack ceilometer alarm evaluation service -ConditionPathExists=/etc/ceilometer/ceilometer.conf -After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=ceilometer -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/ceilometer-alarm-evaluator --config-file /etc/ceilometer/ceilometer.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service deleted file mode 100644 index 7a3e1c91..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack ceilometer alarm notification service -ConditionPathExists=/etc/ceilometer/ceilometer.conf -After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=ceilometer -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/ceilometer-alarm-notifier --config-file /etc/ceilometer/ceilometer.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service deleted file mode 100644 index eb0293bf..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack ceilometer API service -ConditionPathExists=/etc/ceilometer/ceilometer.conf -After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=ceilometer -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/ceilometer-api --config-file /etc/ceilometer/ceilometer.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service deleted file mode 100644 index a1bc11ee..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack ceilometer central agent -ConditionPathExists=/etc/ceilometer/ceilometer.conf -After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service openstack-ceilometer-collector.service -Wants=network-online.target - -[Service] -Type=simple -User=ceilometer -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/ceilometer-agent-central --config-file /etc/ceilometer/ceilometer.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service deleted file mode 100644 index dafc3ac7..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack ceilometer collection service -ConditionPathExists=/etc/ceilometer/ceilometer.conf -After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=ceilometer -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/ceilometer-collector --config-file /etc/ceilometer/ceilometer.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service deleted file mode 100644 index 9fe8a1e6..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack ceilometer compute agent -ConditionPathExists=/etc/ceilometer/ceilometer.conf -After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=ceilometer -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/ceilometer-agent-compute --config-file /etc/ceilometer/ceilometer.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service deleted file mode 100644 index c3e809d7..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Run ceilometer-config-setup Ansible scripts -ConditionPathExists=/etc/openstack/ceilometer.conf - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ceilometer-config.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service deleted file mode 100644 index 7a785227..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Run ceilometer-db-setup Ansible scripts -ConditionPathExists=/etc/openstack/ceilometer.conf -After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-ceilometer-config-setup.service -Wants=network-online.target - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ceilometer-db.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service b/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service deleted file mode 100644 index 6696116e..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack ceilometer notification agent -ConditionPathExists=/etc/ceilometer/ceilometer.conf -After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service openstack-ceilometer-collector.service -Wants=network-online.target - -[Service] -Type=simple -User=ceilometer -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/ceilometer-agent-notification --config-file /etc/ceilometer/ceilometer.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-api.service b/openstack/usr/lib/systemd/system/openstack-cinder-api.service deleted file mode 100644 index a284f31d..00000000 --- a/openstack/usr/lib/systemd/system/openstack-cinder-api.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Volume Service (code-named Cinder) API server -ConditionPathExists=/etc/cinder/cinder.conf -After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=cinder -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/cinder-api --config-file /etc/cinder/cinder.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-backup.service b/openstack/usr/lib/systemd/system/openstack-cinder-backup.service deleted file mode 100644 index c14e13aa..00000000 --- a/openstack/usr/lib/systemd/system/openstack-cinder-backup.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Cinder backup server -ConditionPathExists=/etc/cinder/cinder.conf -After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-volume.service -Wants=network-online.target - -[Service] -Type=simple -User=cinder -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/cinder-backup --config-file /etc/cinder/cinder.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service b/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service deleted file mode 100644 index 1c966933..00000000 --- a/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Run cinder-config-setup Ansible scripts -ConditionPathExists=/etc/openstack/cinder.conf - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-config.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service b/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service deleted file mode 100644 index a3c66d67..00000000 --- a/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Run cinder-db-setup Ansible scripts -ConditionPathExists=/etc/openstack/cinder.conf -After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-cinder-config-setup.service -Wants=network-online.target - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-db.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service b/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service deleted file mode 100644 index 82e9b08d..00000000 --- a/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Run cinder-lvs-setup Ansible scripts -ConditionPathExists=/etc/openstack/cinder.conf -Wants=lvm2-lvmetad.service - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-lvs.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service b/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service deleted file mode 100644 index f205aaff..00000000 --- a/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Cinder scheduler server -ConditionPathExists=/etc/cinder/cinder.conf -After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-volume.service -Wants=network-online.target - -[Service] -Type=simple -User=cinder -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/cinder-scheduler --config-file /etc/cinder/cinder.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-cinder-volume.service b/openstack/usr/lib/systemd/system/openstack-cinder-volume.service deleted file mode 100644 index c56ee693..00000000 --- a/openstack/usr/lib/systemd/system/openstack-cinder-volume.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Cinder volume server -ConditionPathExists=/etc/cinder/cinder.conf -After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-lv-setup.service lvm2-lvmetad.service iscsid.service target.service -Wants=network-online.target - -[Service] -Type=simple -User=cinder -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/cinder-volume --config-file /etc/cinder/cinder.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-glance-api.service b/openstack/usr/lib/systemd/system/openstack-glance-api.service deleted file mode 100644 index 4c34ff10..00000000 --- a/openstack/usr/lib/systemd/system/openstack-glance-api.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=OpenStack Image Service (code-named Glance) API server -ConditionPathExists=/etc/glance/glance-api.conf -After=network-online.target -Wants=network-online.target - -[Service] -Type=simple -User=glance -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/glance-api --config-file /etc/glance/glance-api.conf - -[Install] -WantedBy=multi-user.target - diff --git a/openstack/usr/lib/systemd/system/openstack-glance-registry.service b/openstack/usr/lib/systemd/system/openstack-glance-registry.service deleted file mode 100644 index d53c8b33..00000000 --- a/openstack/usr/lib/systemd/system/openstack-glance-registry.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=OpenStack Image Service (code-named Glance) Registry server -ConditionPathExists=/etc/glance/glance-registry.conf -After=network-online.target -Wants=network-online.target - -[Service] -Type=simple -User=glance -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/glance-registry --config-file /etc/glance/glance-registry.conf - -[Install] -WantedBy=multi-user.target - diff --git a/openstack/usr/lib/systemd/system/openstack-glance-setup.service b/openstack/usr/lib/systemd/system/openstack-glance-setup.service deleted file mode 100644 index 43810797..00000000 --- a/openstack/usr/lib/systemd/system/openstack-glance-setup.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Run glance-setup Ansible scripts -ConditionPathExists=/etc/openstack/glance.conf -After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service -Wants=network-online.target - -[Service] -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/glance.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-horizon-setup.service b/openstack/usr/lib/systemd/system/openstack-horizon-setup.service deleted file mode 100644 index 9ec3197a..00000000 --- a/openstack/usr/lib/systemd/system/openstack-horizon-setup.service +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=Run horizon-setup Ansible scripts -After=local-fs.target -Before=apache-httpd.service - -[Service] -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/horizon.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-ironic-api.service b/openstack/usr/lib/systemd/system/openstack-ironic-api.service deleted file mode 100644 index 5a286a95..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ironic-api.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=OpenStack Bare Metal Provisioning Service (code-named Ironic) API server -ConditionPathExists=/etc/ironic/ironic.conf -After=network-online.target -Wants=network-online.target - -[Service] -Type=simple -User=ironic -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/ironic-api --config-file /etc/ironic/ironic.conf - -[Install] -WantedBy=multi-user.target - diff --git a/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service b/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service deleted file mode 100644 index b3b226e0..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=OpenStack Bare Metal Provisioning Service (code-named Ironic) Conductor server -ConditionPathExists=/etc/ironic/ironic.conf -After=network-online.target -Wants=network-online.target - -[Service] -Type=simple -User=ironic -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/ironic-conductor --config-file /etc/ironic/ironic.conf - -[Install] -WantedBy=multi-user.target - diff --git a/openstack/usr/lib/systemd/system/openstack-ironic-setup.service b/openstack/usr/lib/systemd/system/openstack-ironic-setup.service deleted file mode 100644 index e3a58eb5..00000000 --- a/openstack/usr/lib/systemd/system/openstack-ironic-setup.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Run ironic-setup Ansible scripts -ConditionPathExists=/etc/openstack/ironic.conf -After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service -Wants=network-online.target - -[Service] -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ironic.yml - -[Install] -WantedBy=multi-user.target - diff --git a/openstack/usr/lib/systemd/system/openstack-keystone-setup.service b/openstack/usr/lib/systemd/system/openstack-keystone-setup.service deleted file mode 100644 index db9d0b2b..00000000 --- a/openstack/usr/lib/systemd/system/openstack-keystone-setup.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=Run keystone-setup Ansible scripts -ConditionPathExists=/etc/openstack/keystone.conf -After=local-fs.target network-online.target postgres-server-setup.service -Wants=network-online.target - -[Service] -# Oneshot, since others setup have to wait until this service finishes -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/keystone.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-keystone.service b/openstack/usr/lib/systemd/system/openstack-keystone.service deleted file mode 100644 index 6f6ff644..00000000 --- a/openstack/usr/lib/systemd/system/openstack-keystone.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=OpenStack Identity Service (code-named Keystone) -ConditionPathExists=/etc/keystone/keystone.conf -After=network-online.target -Wants=network-online.target - -[Service] -Type=notify -Restart=always -User=keystone -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/keystone-all --config-file /etc/keystone/keystone.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-network-setup.service b/openstack/usr/lib/systemd/system/openstack-network-setup.service deleted file mode 100644 index 021370d9..00000000 --- a/openstack/usr/lib/systemd/system/openstack-network-setup.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Run Ansible scripts to configure internal network for OpenStack -After=openvswitch-setup.service openvswitch.service -Before=systemd-networkd.service - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/network.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service b/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service deleted file mode 100644 index b74f44ab..00000000 --- a/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Run neutron-config-setup Ansible scripts -ConditionPathExists=/etc/openstack/neutron.conf -After=network-online.target openstack-keystone-setup.service -Wants=network-online.target - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/neutron-config.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service b/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service deleted file mode 100644 index 5d07da2e..00000000 --- a/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Run neutron-db-setup Ansible scripts -ConditionPathExists=/etc/openstack/neutron.conf -After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-neutron-config-setup.service -Wants=network-online.target - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/neutron-db.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service b/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service deleted file mode 100644 index 9080f3c1..00000000 --- a/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Neutron DHCP Agent -ConditionPathExists=/etc/neutron/neutron.conf -After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openstack-neutron-ovs-cleanup.service -Wants=network-online.target - -[Service] -Type=simple -User=neutron -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/neutron-dhcp-agent \ - --config-file=/etc/neutron/neutron.conf \ - --config-file=/etc/neutron/dhcp_agent.ini - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service b/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service deleted file mode 100644 index 76efea5c..00000000 --- a/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=Neutron Layer 3 Agent -ConditionPathExists=/etc/neutron/neutron.conf -After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openstack-neutron-ovs-cleanup.service -Wants=network-online.target - -[Service] -Type=simple -User=neutron -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/neutron-l3-agent \ - --config-file=/etc/neutron/neutron.conf \ - --config-file=/etc/neutron/l3_agent.ini \ - --config-file=/etc/neutron/fwaas_driver.ini - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service b/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service deleted file mode 100644 index 20540e4c..00000000 --- a/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Neutron Metadata Plugin Agent -ConditionPathExists=/etc/neutron/neutron.conf -After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=neutron -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/neutron-metadata-agent \ - --config-file=/etc/neutron/neutron.conf \ - --config-file=/etc/neutron/metadata_agent.ini - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service b/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service deleted file mode 100644 index f5709028..00000000 --- a/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=Neutron OVS cleanup -ConditionPathExists=/etc/neutron/neutron.conf -ConditionFileIsExecutable=/usr/bin/neutron-ovs-cleanup -After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openvswitch.service -Before=openstack-neutron-plugin-openvswitch-agent.service -Wants=network-online.target - -[Service] -Type=oneshot -RemainAfterExit=yes -StandardOutput=null -StandardError=null -User=neutron -ExecStart=/usr/bin/neutron-ovs-cleanup --config-file /etc/neutron/neutron.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service b/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service deleted file mode 100644 index 6c579a62..00000000 --- a/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Neutron OpenvSwitch Plugin Agent -ConditionPathExists=/etc/neutron/neutron.conf -After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=neutron -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/neutron-openvswitch-agent \ - --config-file=/etc/neutron/neutron.conf \ - --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-neutron-server.service b/openstack/usr/lib/systemd/system/openstack-neutron-server.service deleted file mode 100644 index 6376c3d8..00000000 --- a/openstack/usr/lib/systemd/system/openstack-neutron-server.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Neutron Api Server -ConditionPathExists=/etc/neutron/neutron.conf -After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=neutron -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/neutron-server \ - --config-file=/etc/neutron/neutron.conf \ - --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-api.service b/openstack/usr/lib/systemd/system/openstack-nova-api.service deleted file mode 100644 index 521353db..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-api.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Compute Service (code-named Nova) API server -ConditionPathExists=/etc/nova/nova.conf -After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=nova -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/nova-api --config-file /etc/nova/nova.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-cert.service b/openstack/usr/lib/systemd/system/openstack-nova-cert.service deleted file mode 100644 index b3733816..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-cert.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Nova Cert -ConditionPathExists=/etc/nova/nova.conf -After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=nova -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/nova-cert --config-file /etc/nova/nova.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-compute.service b/openstack/usr/lib/systemd/system/openstack-nova-compute.service deleted file mode 100644 index 4f9b8196..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-compute.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=OpenStack Compute Service (code-named Nova) compute server -ConditionPathExists=/etc/nova/nova.conf -After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service libvirtd.service -Wants=network-online.target -Requires=libvirtd.service - -[Service] -Type=simple -User=nova -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-conductor.service b/openstack/usr/lib/systemd/system/openstack-nova-conductor.service deleted file mode 100644 index 4c0d7d43..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-conductor.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=Database-access support for Compute nodes (nova-conductor) -ConditionPathExists=/etc/nova/nova.conf -After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service libvirtd.service -Wants=network-online.target -Requires=libvirtd.service - -[Service] -Type=simple -User=nova -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/nova-conductor --config-file /etc/nova/nova.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service b/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service deleted file mode 100644 index df669aa9..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Run nova-config-setup Ansible scripts -ConditionPathExists=/etc/openstack/nova.conf - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/nova-config.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service b/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service deleted file mode 100644 index e22780a9..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Openstack Console Auth (nova-consoleauth) -ConditionPathExists=/etc/nova/nova.conf -After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=nova -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/nova-consoleauth --config-file /etc/nova/nova.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service b/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service deleted file mode 100644 index 8e004327..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Run nova-db-setup Ansible scripts -ConditionPathExists=/etc/openstack/nova.conf -After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-nova-config-setup.service -Wants=network-online.target - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/nova-db.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service b/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service deleted file mode 100644 index 8cbb20fd..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Nova NoVNC proxy -ConditionPathExists=/etc/nova/nova.conf -After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=nova -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/nova-novncproxy --config-file /etc/nova/nova.conf --web /usr/share/novnc - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service b/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service deleted file mode 100644 index e89f0d3e..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Nova Scheduler -ConditionPathExists=/etc/nova/nova.conf -After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=nova -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/nova-scheduler --config-file /etc/nova/nova.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service b/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service deleted file mode 100644 index 30af8305..00000000 --- a/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Nova Serial Proxy -ConditionPathExists=/etc/nova/nova.conf -After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service -Wants=network-online.target - -[Service] -Type=simple -User=nova -StandardOutput=null -StandardError=null -ExecStart=/usr/bin/nova-serialproxy --config-file /etc/nova/nova.conf - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openvswitch-db-server.service b/openstack/usr/lib/systemd/system/openvswitch-db-server.service deleted file mode 100644 index 34a7c812..00000000 --- a/openstack/usr/lib/systemd/system/openvswitch-db-server.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Open vSwitch Database Server Daemon -After=local-fs.target - -[Service] -Type=forking -ExecStartPre=-/usr/bin/mkdir -p /var/run/openvswitch -ExecStart=/usr/sbin/ovsdb-server --remote=punix:/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,Open_vSwitch,manager_options --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --pidfile --detach - -[Install] -WantedBy=multi-user.target - diff --git a/openstack/usr/lib/systemd/system/openvswitch-setup.service b/openstack/usr/lib/systemd/system/openvswitch-setup.service deleted file mode 100644 index 8393ebbc..00000000 --- a/openstack/usr/lib/systemd/system/openvswitch-setup.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Run openvswitch-setup Ansible scripts -After=local-fs.target - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/openstack/hosts /usr/share/openstack/openvswitch.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/openvswitch.service b/openstack/usr/lib/systemd/system/openvswitch.service deleted file mode 100644 index 113911f6..00000000 --- a/openstack/usr/lib/systemd/system/openvswitch.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Open vSwitch Daemon -Before=network-pre.target -Wants=network-pre.target - -[Service] -Type=forking -ExecStartPre=-/usr/bin/mkdir -p /var/run/openvswitch -ExecStart=/usr/sbin/ovs-vswitchd --pidfile --detach - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/postgres-server-setup.service b/openstack/usr/lib/systemd/system/postgres-server-setup.service deleted file mode 100644 index 202c0636..00000000 --- a/openstack/usr/lib/systemd/system/postgres-server-setup.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Run postgres-setup Ansible scripts -ConditionPathExists=/etc/openstack/postgres.conf -After=local-fs.target - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/openstack/hosts /usr/share/openstack/postgres.yml - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/postgres-server.service b/openstack/usr/lib/systemd/system/postgres-server.service deleted file mode 100644 index 9e11f26d..00000000 --- a/openstack/usr/lib/systemd/system/postgres-server.service +++ /dev/null @@ -1,26 +0,0 @@ -[Unit] -Description=PostgreSQL database server -After=network-online.target -Wants=network-online.target - -[Service] -Type=forking -TimeoutSec=120 -User=postgres -Group=postgres - -Environment=PGROOT=/var/lib/pgsql - -SyslogIdentifier=postgres -PIDFile=/var/lib/pgsql/data/postmaster.pid - -ExecStart=/usr/bin/pg_ctl -s -D ${PGROOT}/data start -w -t 120 -ExecReload=/usr/bin/pg_ctl -s -D ${PGROOT}/data reload -ExecStop=/usr/bin/pg_ctl -s -D ${PGROOT}/data stop -m fast - -# Due to PostgreSQL's use of shared memory, OOM killer is often overzealous in -# killing Postgres, so adjust it downward -OOMScoreAdjust=-200 - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/rabbitmq-server.service b/openstack/usr/lib/systemd/system/rabbitmq-server.service deleted file mode 100644 index 1a20f3e4..00000000 --- a/openstack/usr/lib/systemd/system/rabbitmq-server.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=RabbitMQ broker -After=network-online.target -Wants=network-online.target - -[Service] -Type=notify -User=rabbitmq -Group=rabbitmq -Environment=HOME=/var/lib/rabbitmq -WorkingDirectory=/var/lib/rabbitmq -ExecStart=/usr/sbin/rabbitmq-server -ExecStop=/usr/sbin/rabbitmqctl stop - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/swift-controller-setup.service b/openstack/usr/lib/systemd/system/swift-controller-setup.service deleted file mode 100644 index ccfbcbe6..00000000 --- a/openstack/usr/lib/systemd/system/swift-controller-setup.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Run swift-controller-setup (once) -After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service -Wants=network-online.target - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/swift-controller.yml -Restart=no - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/lib/systemd/system/swift-proxy.service b/openstack/usr/lib/systemd/system/swift-proxy.service deleted file mode 100644 index 7b0a2e17..00000000 --- a/openstack/usr/lib/systemd/system/swift-proxy.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=OpenStack Swift Proxy Server -After=network-online.target swift-controller-setup.service memcached.service -Wants=network-online.target - -[Service] -Type=forking -PIDFile=/var/run/swift/proxy-server.pid -Restart=on-failure -ExecStart=/usr/bin/swift-init proxy-server start -ExecStop=/usr/bin/swift-init proxy-server stop - -[Install] -WantedBy=multi-user.target diff --git a/openstack/usr/share/openstack/ceilometer-config.yml b/openstack/usr/share/openstack/ceilometer-config.yml deleted file mode 100644 index 9850d84d..00000000 --- a/openstack/usr/share/openstack/ceilometer-config.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/ceilometer.conf" - tasks: -# Configure ceilometer - - name: Create the ceilometer user. - user: - name: ceilometer - comment: Openstack Ceilometer Daemons - shell: /sbin/nologin - home: /var/lib/ceilometer - - - name: Create the /var folders for ceilometer - file: - path: "{{ item }}" - state: directory - owner: ceilometer - group: ceilometer - with_items: - - /var/run/ceilometer - - /var/lock/ceilometer - - /var/log/ceilometer - - /var/lib/ceilometer - - - name: Create /etc/ceilometer directory - file: - path: /etc/ceilometer - state: directory - - - name: Add the configuration needed for ceilometer in /etc/ceilometer using templates - template: - src: /usr/share/openstack/ceilometer/{{ item }} - dest: /etc/ceilometer/{{ item }} - with_lines: - - cd /usr/share/openstack/ceilometer && find -type f diff --git a/openstack/usr/share/openstack/ceilometer-db.yml b/openstack/usr/share/openstack/ceilometer-db.yml deleted file mode 100644 index 717c7d7d..00000000 --- a/openstack/usr/share/openstack/ceilometer-db.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/ceilometer.conf" - tasks: - - name: Create ceilometer service user in service tenant - keystone_user: - user: "{{ CEILOMETER_SERVICE_USER }}" - password: "{{ CEILOMETER_SERVICE_PASSWORD }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Assign admin role to ceilometers service user in the service tenant - keystone_user: - role: admin - user: "{{ CEILOMETER_SERVICE_USER }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Add ceilometer endpoint - keystone_service: - name: ceilometer - type: metering - description: Openstack Metering Service - publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777 - internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777 - adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777 - region: regionOne - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Create postgresql user for ceilometer - postgresql_user: - name: "{{ CEILOMETER_DB_USER }}" - login_host: "{{ CONTROLLER_HOST_ADDRESS }}" - password: "{{ CEILOMETER_DB_PASSWORD }}" - sudo: yes - sudo_user: ceilometer - - - name: Create database for ceilometer services - postgresql_db: - name: ceilometer - owner: "{{ CEILOMETER_DB_USER }}" - login_host: "{{ CONTROLLER_HOST_ADDRESS }}" - sudo: yes - sudo_user: ceilometer - - - name: Initiate ceilometer database - command: ceilometer-dbsync - sudo: yes - sudo_user: ceilometer diff --git a/openstack/usr/share/openstack/ceilometer/ceilometer.conf b/openstack/usr/share/openstack/ceilometer/ceilometer.conf deleted file mode 100644 index b572d40f..00000000 --- a/openstack/usr/share/openstack/ceilometer/ceilometer.conf +++ /dev/null @@ -1,1023 +0,0 @@ -[DEFAULT] - -# -# Options defined in ceilometer.middleware -# - -# Exchanges name to listen for notifications. (multi valued) -#http_control_exchanges=nova -#http_control_exchanges=glance -#http_control_exchanges=neutron -#http_control_exchanges=cinder - - -# -# Options defined in ceilometer.pipeline -# - -# Configuration file for pipeline definition. (string value) -#pipeline_cfg_file=pipeline.yaml - - -# -# Options defined in ceilometer.sample -# - -# Source for samples emitted on this instance. (string value) -# Deprecated group/name - [DEFAULT]/counter_source -#sample_source=openstack - - -# -# Options defined in ceilometer.service -# - -# Name of this node, which must be valid in an AMQP key. Can -# be an opaque identifier. For ZeroMQ only, must be a valid -# host name, FQDN, or IP address. (string value) -#host=ceilometer - -# Dispatcher to process data. (multi valued) -#dispatcher=database - -# Number of workers for collector service. A single -# collector is enabled by default. (integer value) -#collector_workers=1 - -# Number of workers for notification service. A single -# notification agent is enabled by default. (integer value) -#notification_workers=1 - - -# -# Options defined in ceilometer.api.app -# - -# The strategy to use for auth: noauth or keystone. (string -# value) -auth_strategy=keystone - -# Deploy the deprecated v1 API. (boolean value) -#enable_v1_api=true - - -# -# Options defined in ceilometer.compute.notifications -# - -# Exchange name for Nova notifications. (string value) -#nova_control_exchange=nova - - -# -# Options defined in ceilometer.compute.util -# - -# List of metadata prefixes reserved for metering use. (list -# value) -#reserved_metadata_namespace=metering. - -# Limit on length of reserved metadata values. (integer value) -#reserved_metadata_length=256 - - -# -# Options defined in ceilometer.compute.virt.inspector -# - -# Inspector to use for inspecting the hypervisor layer. -# (string value) -#hypervisor_inspector=libvirt - - -# -# Options defined in ceilometer.compute.virt.libvirt.inspector -# - -# Libvirt domain type (valid options are: kvm, lxc, qemu, uml, -# xen). (string value) -#libvirt_type=kvm - -# Override the default libvirt URI (which is dependent on -# libvirt_type). (string value) -#libvirt_uri= - - -# -# Options defined in ceilometer.image.notifications -# - -# Exchange name for Glance notifications. (string value) -#glance_control_exchange=glance - - -# -# Options defined in ceilometer.network.notifications -# - -# Exchange name for Neutron notifications. (string value) -# Deprecated group/name - [DEFAULT]/quantum_control_exchange -#neutron_control_exchange=neutron - - -# -# Options defined in ceilometer.objectstore.swift -# - -# Swift reseller prefix. Must be on par with reseller_prefix -# in proxy-server.conf. (string value) -#reseller_prefix=AUTH_ - - -# -# Options defined in ceilometer.openstack.common.db.sqlalchemy.session -# - -# The file name to use with SQLite (string value) -#sqlite_db=ceilometer.sqlite - -# If True, SQLite uses synchronous mode (boolean value) -#sqlite_synchronous=true - - -# -# Options defined in ceilometer.openstack.common.eventlet_backdoor -# - -# Enable eventlet backdoor. Acceptable values are 0, , -# and :, where 0 results in listening on a random -# tcp port number; results in listening on the -# specified port number (and not enabling backdoor if that -# port is in use); and : results in listening on -# the smallest unused port number within the specified range -# of port numbers. The chosen port is displayed in the -# service's log file. (string value) -#backdoor_port= - - -# -# Options defined in ceilometer.openstack.common.lockutils -# - -# Whether to disable inter-process locks. (boolean value) -#disable_process_locking=false - -# Directory to use for lock files. (string value) -#lock_path= - - -# -# Options defined in ceilometer.openstack.common.log -# - -# Print debugging output (set logging level to DEBUG instead -# of default WARNING level). (boolean value) -#debug=false - -# Print more verbose output (set logging level to INFO instead -# of default WARNING level). (boolean value) -#verbose=false - -# Log output to standard error (boolean value) -#use_stderr=true - -# Format string to use for log messages with context (string -# value) -#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages without context -# (string value) -#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG (string -# value) -#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format -# (string value) -#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s - -# List of logger=LEVEL pairs (list value) -#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN - -# Publish error events (boolean value) -#publish_errors=false - -# Make deprecations fatal (boolean value) -#fatal_deprecations=false - -# If an instance is passed with the log message, format it -# like this (string value) -#instance_format="[instance: %(uuid)s] " - -# If an instance UUID is passed with the log message, format -# it like this (string value) -#instance_uuid_format="[instance: %(uuid)s] " - -# The name of logging configuration file. It does not disable -# existing loggers, but just appends specified logging -# configuration to any other existing logging options. Please -# see the Python logging module documentation for details on -# logging configuration files. (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append= - -# DEPRECATED. A logging.Formatter log message format string -# which may use any of the available logging.LogRecord -# attributes. This option is deprecated. Please use -# logging_context_format_string and -# logging_default_format_string instead. (string value) -#log_format= - -# Format string for %%(asctime)s in log records. Default: -# %(default)s (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If no default is -# set, logging will go to stdout. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file= - -# (Optional) The base directory used for relative --log-file -# paths (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir= - -# Use syslog for logging. Existing syslog format is DEPRECATED -# during I, and then will be changed in J to honor RFC5424 -# (boolean value) -use_syslog=true - -# (Optional) Use syslog rfc5424 format for logging. If -# enabled, will add APP-NAME (RFC5424) before the MSG part of -# the syslog message. The old format without APP-NAME is -# deprecated in I, and will be removed in J. (boolean value) -#use_syslog_rfc_format=false - -# Syslog facility to receive log lines (string value) -#syslog_log_facility=LOG_USER - - -# -# Options defined in ceilometer.openstack.common.middleware.sizelimit -# - -# The maximum body size per request, in bytes (integer value) -# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size -#max_request_body_size=114688 - - -# -# Options defined in ceilometer.openstack.common.notifier.api -# - -# Driver or drivers to handle sending notifications (multi -# valued) -#notification_driver= - -# Default notification level for outgoing notifications -# (string value) -#default_notification_level=INFO - -# Default publisher_id for outgoing notifications (string -# value) -#default_publisher_id= - - -# -# Options defined in ceilometer.openstack.common.notifier.rpc_notifier -# - -# AMQP topic used for OpenStack notifications (list value) -#notification_topics=notifications - - -# -# Options defined in ceilometer.openstack.common.policy -# - -# JSON file containing policy (string value) -#policy_file=policy.json - -# Rule enforced when requested rule is not found (string -# value) -#policy_default_rule=default - - -# -# Options defined in ceilometer.openstack.common.rpc -# - -# The messaging module to use, defaults to kombu. (string -# value) -rpc_backend=rabbit - -# Size of RPC thread pool (integer value) -#rpc_thread_pool_size=64 - -# Size of RPC connection pool (integer value) -#rpc_conn_pool_size=30 - -# Seconds to wait for a response from call or multicall -# (integer value) -#rpc_response_timeout=60 - -# Seconds to wait before a cast expires (TTL). Only supported -# by impl_zmq. (integer value) -#rpc_cast_timeout=30 - -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. (list value) -#allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions - -# If passed, use a fake RabbitMQ provider (boolean value) -#fake_rabbit=false - -# AMQP exchange to connect to if using RabbitMQ or Qpid -# (string value) -#control_exchange=openstack - - -# -# Options defined in ceilometer.openstack.common.rpc.amqp -# - -# Use durable queues in amqp. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues=false - -# Auto-delete queues in amqp. (boolean value) -#amqp_auto_delete=false - - -# -# Options defined in ceilometer.openstack.common.rpc.impl_kombu -# - -# If SSL is enabled, the SSL version to use. Valid values are -# TLSv1, SSLv23 and SSLv3. SSLv2 might be available on some -# distributions. (string value) -#kombu_ssl_version= - -# SSL key file (valid only if SSL enabled) (string value) -#kombu_ssl_keyfile= - -# SSL cert file (valid only if SSL enabled) (string value) -#kombu_ssl_certfile= - -# SSL certification authority file (valid only if SSL enabled) -# (string value) -#kombu_ssl_ca_certs= - -# The RabbitMQ broker address where a single node is used -# (string value) -rabbit_host = {{ RABBITMQ_HOST }} - - -# The RabbitMQ broker port where a single node is used -# (integer value) -rabbit_port= {{ RABBITMQ_PORT }} - -# RabbitMQ HA cluster host:port pairs (list value) -#rabbit_hosts=$rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ (boolean value) -rabbit_use_ssl=false - -# The RabbitMQ userid (string value) -rabbit_userid= {{ RABBITMQ_USER }} - -# The RabbitMQ password (string value) -rabbit_password = {{ RABBITMQ_PASSWORD }} - - -# The RabbitMQ virtual host (string value) -rabbit_virtual_host=/ - -# How frequently to retry connecting with RabbitMQ (integer -# value) -#rabbit_retry_interval=1 - -# How long to backoff for between retries when connecting to -# RabbitMQ (integer value) -#rabbit_retry_backoff=2 - -# Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count) (integer value) -#rabbit_max_retries=0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change -# this option, you must wipe the RabbitMQ database. (boolean -# value) -#rabbit_ha_queues=false - - -# -# Options defined in ceilometer.openstack.common.rpc.impl_qpid -# - -# Qpid broker hostname (string value) -#qpid_hostname=localhost - -# Qpid broker port (integer value) -#qpid_port=5672 - -# Qpid HA cluster host:port pairs (list value) -#qpid_hosts=$qpid_hostname:$qpid_port - -# Username for qpid connection (string value) -#qpid_username= - -# Password for qpid connection (string value) -#qpid_password= - -# Space separated list of SASL mechanisms to use for auth -# (string value) -#qpid_sasl_mechanisms= - -# Seconds between connection keepalive heartbeats (integer -# value) -#qpid_heartbeat=60 - -# Transport to use, either 'tcp' or 'ssl' (string value) -#qpid_protocol=tcp - -# Disable Nagle algorithm (boolean value) -#qpid_tcp_nodelay=true - -# The qpid topology version to use. Version 1 is what was -# originally used by impl_qpid. Version 2 includes some -# backwards-incompatible changes that allow broker federation -# to work. Users should update to version 2 when they are -# able to take everything down, as it requires a clean break. -# (integer value) -#qpid_topology_version=1 - - -# -# Options defined in ceilometer.openstack.common.rpc.impl_zmq -# - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet -# interface, or IP. The "host" option should point or resolve -# to this address. (string value) -#rpc_zmq_bind_address=* - -# MatchMaker driver (string value) -#rpc_zmq_matchmaker=ceilometer.openstack.common.rpc.matchmaker.MatchMakerLocalhost - -# ZeroMQ receiver listening port (integer value) -#rpc_zmq_port=9501 - -# Number of ZeroMQ contexts, defaults to 1 (integer value) -#rpc_zmq_contexts=1 - -# Maximum number of ingress messages to locally buffer per -# topic. Default is unlimited. (integer value) -#rpc_zmq_topic_backlog= - -# Directory for holding IPC sockets (string value) -#rpc_zmq_ipc_dir=/var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP -# address. Must match "host" option, if running Nova. (string -# value) -#rpc_zmq_host=ceilometer - - -# -# Options defined in ceilometer.openstack.common.rpc.matchmaker -# - -# Heartbeat frequency (integer value) -#matchmaker_heartbeat_freq=300 - -# Heartbeat time-to-live. (integer value) -#matchmaker_heartbeat_ttl=600 - - -# -# Options defined in ceilometer.orchestration.notifications -# - -# Exchange name for Heat notifications (string value) -#heat_control_exchange=heat - - -# -# Options defined in ceilometer.storage -# - -# DEPRECATED - Database connection string. (string value) -#database_connection= - - -# -# Options defined in ceilometer.storage.sqlalchemy.models -# - -# MySQL engine to use. (string value) -#mysql_engine=InnoDB - - -# -# Options defined in ceilometer.volume.notifications -# - -# Exchange name for Cinder notifications. (string value) -cinder_control_exchange=cinder - - -[alarm] - -# -# Options defined in ceilometer.cli -# - -# Class to launch as alarm evaluation service. (string value) -#evaluation_service=ceilometer.alarm.service.SingletonAlarmService - - -# -# Options defined in ceilometer.alarm.notifier.rest -# - -# SSL Client certificate for REST notifier. (string value) -#rest_notifier_certificate_file= - -# SSL Client private key for REST notifier. (string value) -#rest_notifier_certificate_key= - -# Whether to verify the SSL Server certificate when calling -# alarm action. (boolean value) -#rest_notifier_ssl_verify=true - - -# -# Options defined in ceilometer.alarm.rpc -# - -# The topic that ceilometer uses for alarm notifier messages. -# (string value) -#notifier_rpc_topic=alarm_notifier - -# The topic that ceilometer uses for alarm partition -# coordination messages. (string value) -#partition_rpc_topic=alarm_partition_coordination - - -# -# Options defined in ceilometer.alarm.service -# - -# Period of evaluation cycle, should be >= than configured -# pipeline interval for collection of underlying metrics. -# (integer value) -# Deprecated group/name - [alarm]/threshold_evaluation_interval -#evaluation_interval=60 - - -# -# Options defined in ceilometer.api.controllers.v2 -# - -# Record alarm change events. (boolean value) -#record_history=true - - -[api] - -# -# Options defined in ceilometer.api -# - -# The port for the ceilometer API server. (integer value) -# Deprecated group/name - [DEFAULT]/metering_api_port -#port=8777 - -# The listen IP for the ceilometer API server. (string value) -#host=0.0.0.0 - - -[collector] - -# -# Options defined in ceilometer.collector -# - -# Address to which the UDP socket is bound. Set to an empty -# string to disable. (string value) -#udp_address=0.0.0.0 - -# Port to which the UDP socket is bound. (integer value) -#udp_port=4952 - - -[database] - -# -# Options defined in ceilometer.openstack.common.db.api -# - -# The backend to use for db (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend=sqlalchemy - - -# -# Options defined in ceilometer.openstack.common.db.sqlalchemy.session -# - -# The SQLAlchemy connection string used to connect to the -# database (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -connection=postgresql://{{ CEILOMETER_DB_USER }}:{{ CEILOMETER_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/ceilometer - -# The SQLAlchemy connection string used to connect to the -# slave database (string value) -#slave_connection= - -# Timeout before idle sql connections are reaped (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout=3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size=1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size= - -# Maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries=10 - -# Interval between retries of opening a sql connection -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval=10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow= - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug=0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace=false - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout= - - -# -# Options defined in ceilometer.storage -# - -# Number of seconds that samples are kept in the database for -# (<= 0 means forever). (integer value) -#time_to_live=-1 - - -[dispatcher_file] - -# -# Options defined in ceilometer.dispatcher.file -# - -# Name and the location of the file to record meters. (string -# value) -#file_path= - -# The max size of the file. (integer value) -#max_bytes=0 - -# The max number of the files to keep. (integer value) -#backup_count=0 - - -[event] - -# -# Options defined in ceilometer.event.converter -# - -# Configuration file for event definitions. (string value) -#definitions_cfg_file=event_definitions.yaml - -# Drop notifications if no event definition matches. -# (Otherwise, we convert them with just the default traits) -# (boolean value) -#drop_unmatched_notifications=false - - -[keystone_authtoken] - -# -# Options defined in keystoneclient.middleware.auth_token -# - -# Prefix to prepend at the beginning of the path (string -# value) -#auth_admin_prefix= - -# Host providing the admin Identity API endpoint (string -# value) -#auth_host=127.0.0.1 - -# Port of the admin Identity API endpoint (integer value) -#auth_port=35357 - -# Protocol of the admin Identity API endpoint(http or https) -# (string value) -#auth_protocol=https - -# Complete public Identity API endpoint (string value) -auth_uri= http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 -identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 - - -# API version of the admin Identity API endpoint (string -# value) -#auth_version= - -# Do not handle authorization requests within the middleware, -# but delegate the authorization decision to downstream WSGI -# components (boolean value) -#delay_auth_decision=false - -# Request timeout value for communicating with Identity API -# server. (boolean value) -#http_connect_timeout= - -# How many times are we trying to reconnect when communicating -# with Identity API Server. (integer value) -#http_request_max_retries=3 - -# Allows to pass in the name of a fake http_handler callback -# function used instead of httplib.HTTPConnection or -# httplib.HTTPSConnection. Useful for unit testing where -# network is not available. (string value) -#http_handler= - -# Single shared secret with the Keystone configuration used -# for bootstrapping a Keystone installation, or otherwise -# bypassing the normal authentication process. (string value) -#admin_token= - -# Keystone account username (string value) -admin_user = {{ CEILOMETER_SERVICE_USER }} - -# Keystone account password (string value) -admin_password = {{ CEILOMETER_SERVICE_PASSWORD }} - -# Keystone service account tenant name to validate user tokens -# (string value) -admin_tenant_name = service - -# Env key for the swift cache (string value) -#cache= - -# Required if Keystone server requires client certificate -# (string value) -#certfile= - -# Required if Keystone server requires client certificate -# (string value) -#keyfile= - -# A PEM encoded Certificate Authority to use when verifying -# HTTPS connections. Defaults to system CAs. (string value) -#cafile= - -# Verify HTTPS connections. (boolean value) -#insecure=false - -# Directory used to cache files related to PKI tokens (string -# value) -#signing_dir= - -# If defined, the memcache server(s) to use for caching (list -# value) -# Deprecated group/name - [DEFAULT]/memcache_servers -#memcached_servers= - -# In order to prevent excessive requests and validations, the -# middleware uses an in-memory cache for the tokens the -# Keystone API returns. This is only valid if memcache_servers -# is defined. Set to -1 to disable caching completely. -# (integer value) -#token_cache_time=300 - -# Value only used for unit testing (integer value) -#revocation_cache_time=1 - -# (optional) if defined, indicate whether token data should be -# authenticated or authenticated and encrypted. Acceptable -# values are MAC or ENCRYPT. If MAC, token data is -# authenticated (with HMAC) in the cache. If ENCRYPT, token -# data is encrypted and authenticated in the cache. If the -# value is not one of these options or empty, auth_token will -# raise an exception on initialization. (string value) -#memcache_security_strategy= - -# (optional, mandatory if memcache_security_strategy is -# defined) this string is used for key derivation. (string -# value) -#memcache_secret_key= - -# (optional) indicate whether to set the X-Service-Catalog -# header. If False, middleware will not ask for service -# catalog on token validation and will not set the X-Service- -# Catalog header. (boolean value) -#include_service_catalog=true - -# Used to control the use and type of token binding. Can be -# set to: "disabled" to not check token binding. "permissive" -# (default) to validate binding information if the bind type -# is of a form known to the server and ignore it if not. -# "strict" like "permissive" but if the bind type is unknown -# the token will be rejected. "required" any form of token -# binding is needed to be allowed. Finally the name of a -# binding method that must be present in tokens. (string -# value) -#enforce_token_bind=permissive - - -[matchmaker_redis] - -# -# Options defined in ceilometer.openstack.common.rpc.matchmaker_redis -# - -# Host to locate redis (string value) -#host=127.0.0.1 - -# Use this port to connect to redis host. (integer value) -#port=6379 - -# Password for Redis server. (optional) (string value) -#password= - - -[matchmaker_ring] - -# -# Options defined in ceilometer.openstack.common.rpc.matchmaker_ring -# - -# Matchmaker ring file (JSON) (string value) -# Deprecated group/name - [DEFAULT]/matchmaker_ringfile -#ringfile=/etc/oslo/matchmaker_ring.json - - -[notification] - -# -# Options defined in ceilometer.notification -# - -# Acknowledge message when event persistence fails. (boolean -# value) -#ack_on_event_error=true - -# Save event details. (boolean value) -#store_events=false - - -[publisher] - -# -# Options defined in ceilometer.publisher.utils -# - -# Secret value for signing metering messages. (string value) -# Deprecated group/name - [DEFAULT]/metering_secret -# Deprecated group/name - [publisher_rpc]/metering_secret -# It should be set to some random value -metering_secret = {{ METERING_SECRET }} - -[publisher_rpc] - -# -# Options defined in ceilometer.publisher.rpc -# - -# The topic that ceilometer uses for metering messages. -# (string value) -#metering_topic=metering - - -[rpc_notifier2] - -# -# Options defined in ceilometer.openstack.common.notifier.rpc_notifier2 -# - -# AMQP topic(s) used for OpenStack notifications (list value) -#topics=notifications - - -[service_credentials] - -# -# Options defined in ceilometer.service -# - -# User name to use for OpenStack service access. (string -# value) -os_username = {{ CEILOMETER_SERVICE_USER }} - -# Password to use for OpenStack service access. (string value) -os_password = {{ CEILOMETER_SERVICE_PASSWORD }} - -# Tenant ID to use for OpenStack service access. (string -# value) -#os_tenant_id= - -# Tenant name to use for OpenStack service access. (string -# value) -os_tenant_name = service - -# Certificate chain for SSL validation. (string value) -#os_cacert= - -# Auth URL to use for OpenStack service access. (string value) -os_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 - -# Region name to use for OpenStack service endpoints. (string -# value) -os_region_name=regionOne - -# Type of endpoint in Identity service catalog to use for -# communication with OpenStack services. (string value) -os_endpoint_type=internalURL - -# Disables X.509 certificate validation when an SSL connection -# to Identity Service is established. (boolean value) -#insecure=false - - -[ssl] - -# -# Options defined in ceilometer.openstack.common.sslutils -# - -# CA certificate file to use to verify connecting clients -# (string value) -#ca_file= - -# Certificate file to use when starting the server securely -# (string value) -#cert_file= - -# Private key file to use when starting the server securely -# (string value) -#key_file= - - -[vmware] - -# -# Options defined in ceilometer.compute.virt.vmware.inspector -# - -# IP address of the VMware Vsphere host (string value) -#host_ip= - -# Username of VMware Vsphere (string value) -#host_username= - -# Password of VMware Vsphere (string value) -#host_password= - -# Number of times a VMware Vsphere API must be retried -# (integer value) -#api_retry_count=10 - -# Sleep time in seconds for polling an ongoing async task -# (floating point value) -#task_poll_interval=0.5 diff --git a/openstack/usr/share/openstack/cinder-config.yml b/openstack/usr/share/openstack/cinder-config.yml deleted file mode 100644 index fd3e2cd0..00000000 --- a/openstack/usr/share/openstack/cinder-config.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/cinder.conf" - tasks: -# Configure cinder - - name: Create the cinder user. - user: - name: cinder - comment: Openstack Cinder Daemons - shell: /sbin/nologin - home: /var/lib/cinder - - - name: Create the /var folders for cinder - file: - path: "{{ item }}" - state: directory - owner: cinder - group: cinder - with_items: - - /var/run/cinder - - /var/lock/cinder - - /var/log/cinder - - /var/lib/cinder - - /var/lib/cinder/volumes - - - name: Create /etc/cinder directory - file: - path: /etc/cinder - state: directory - - - name: Add the configuration needed for cinder in /etc/cinder using templates - template: - src: /usr/share/openstack/cinder/{{ item }} - dest: /etc/cinder/{{ item }} - with_lines: - - cd /usr/share/openstack/cinder && find -type f diff --git a/openstack/usr/share/openstack/cinder-db.yml b/openstack/usr/share/openstack/cinder-db.yml deleted file mode 100644 index 2a211720..00000000 --- a/openstack/usr/share/openstack/cinder-db.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/cinder.conf" - tasks: - - name: Create cinder service user in service tenant - keystone_user: - user: "{{ CINDER_SERVICE_USER }}" - password: "{{ CINDER_SERVICE_PASSWORD }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Assign admin role to cinder service user in the service tenant - keystone_user: - role: admin - user: "{{ CINDER_SERVICE_USER }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Add cinder endpoint - keystone_service: - name: cinder - type: volume - description: Openstack Block Storage - publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s' - internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s' - adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s' - region: regionOne - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Add cinderv2 endpoint - keystone_service: - name: cinderv2 - type: volumev2 - description: Openstack Block Storage - publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s' - internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s' - adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s' - region: regionOne - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Create postgresql user for cinder - postgresql_user: - name: "{{ CINDER_DB_USER }}" - password: "{{ CINDER_DB_PASSWORD }}" - sudo: yes - sudo_user: cinder - - - name: Create database for cinder services - postgresql_db: - name: cinder - owner: "{{ CINDER_DB_USER }}" - sudo: yes - sudo_user: cinder - - - name: Initiate cinder database - cinder_manage: - action: dbsync - sudo: yes - sudo_user: cinder diff --git a/openstack/usr/share/openstack/cinder-lvs.yml b/openstack/usr/share/openstack/cinder-lvs.yml deleted file mode 100644 index 7a91a306..00000000 --- a/openstack/usr/share/openstack/cinder-lvs.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/cinder.conf" - tasks: - - name: Check that CINDER_DEVICE exists - stat: - path: "{{ CINDER_DEVICE }}" - register: cinder_device_stats - failed_when: cinder_device_stats.stat.exists == false - - - name: Configure LVM group for cinder - lvg: - vg: cinder-volumes - pvs: "{{ CINDER_DEVICE }}" - - - lineinfile: - dest: /etc/lvm/lvm.conf - regexp: '# filter = \[ \"a\/\.\*/\" \]' - line: ' filter = [ "a|{{ CINDER_DEVICE }}|", "r/.*/" ]' - backrefs: yes diff --git a/openstack/usr/share/openstack/cinder/api-paste.ini b/openstack/usr/share/openstack/cinder/api-paste.ini deleted file mode 100644 index ba922d5f..00000000 --- a/openstack/usr/share/openstack/cinder/api-paste.ini +++ /dev/null @@ -1,60 +0,0 @@ -############# -# OpenStack # -############# - -[composite:osapi_volume] -use = call:cinder.api:root_app_factory -/: apiversions -/v1: openstack_volume_api_v1 -/v2: openstack_volume_api_v2 - -[composite:openstack_volume_api_v1] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = request_id faultwrap sizelimit osprofiler noauth apiv1 -keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 -keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 - -[composite:openstack_volume_api_v2] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = request_id faultwrap sizelimit osprofiler noauth apiv2 -keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 -keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 - -[filter:request_id] -paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory - -[filter:faultwrap] -paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory -hmac_keys = SECRET_KEY -enabled = yes - -[filter:noauth] -paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory - -[filter:sizelimit] -paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory - -[app:apiv1] -paste.app_factory = cinder.api.v1.router:APIRouter.factory - -[app:apiv2] -paste.app_factory = cinder.api.v2.router:APIRouter.factory - -[pipeline:apiversions] -pipeline = faultwrap osvolumeversionapp - -[app:osvolumeversionapp] -paste.app_factory = cinder.api.versions:Versions.factory - -########## -# Shared # -########## - -[filter:keystonecontext] -paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/openstack/usr/share/openstack/cinder/cinder.conf b/openstack/usr/share/openstack/cinder/cinder.conf deleted file mode 100644 index a58004b5..00000000 --- a/openstack/usr/share/openstack/cinder/cinder.conf +++ /dev/null @@ -1,2825 +0,0 @@ -[DEFAULT] - -# -# Options defined in oslo.messaging -# - -# Use durable queues in AMQP. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues=false - -# Auto-delete queues in AMQP. (boolean value) -#amqp_auto_delete=false - -# Size of RPC connection pool. (integer value) -#rpc_conn_pool_size=30 - -# Qpid broker hostname. (string value) -#qpid_hostname=localhost - -# Qpid broker port. (integer value) -#qpid_port=5672 - -# Qpid HA cluster host:port pairs. (list value) -#qpid_hosts=$qpid_hostname:$qpid_port - -# Username for Qpid connection. (string value) -#qpid_username= - -# Password for Qpid connection. (string value) -#qpid_password= - -# Space separated list of SASL mechanisms to use for auth. -# (string value) -#qpid_sasl_mechanisms= - -# Seconds between connection keepalive heartbeats. (integer -# value) -#qpid_heartbeat=60 - -# Transport to use, either 'tcp' or 'ssl'. (string value) -#qpid_protocol=tcp - -# Whether to disable the Nagle algorithm. (boolean value) -#qpid_tcp_nodelay=true - -# The number of prefetched messages held by receiver. (integer -# value) -#qpid_receiver_capacity=1 - -# The qpid topology version to use. Version 1 is what was -# originally used by impl_qpid. Version 2 includes some -# backwards-incompatible changes that allow broker federation -# to work. Users should update to version 2 when they are -# able to take everything down, as it requires a clean break. -# (integer value) -#qpid_topology_version=1 - -# SSL version to use (valid only if SSL enabled). valid values -# are TLSv1 and SSLv23. SSLv2 and SSLv3 may be available on -# some distributions. (string value) -#kombu_ssl_version= - -# SSL key file (valid only if SSL enabled). (string value) -#kombu_ssl_keyfile= - -# SSL cert file (valid only if SSL enabled). (string value) -#kombu_ssl_certfile= - -# SSL certification authority file (valid only if SSL -# enabled). (string value) -#kombu_ssl_ca_certs= - -# How long to wait before reconnecting in response to an AMQP -# consumer cancel notification. (floating point value) -#kombu_reconnect_delay=1.0 - -# The RabbitMQ broker address where a single node is used. -# (string value) -rabbit_host={{ RABBITMQ_HOST }} - -# The RabbitMQ broker port where a single node is used. -# (integer value) -rabbit_port={{ RABBITMQ_PORT }} - -# RabbitMQ HA cluster host:port pairs. (list value) -#rabbit_hosts=$rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -#rabbit_use_ssl=false - -# The RabbitMQ userid. (string value) -rabbit_userid={{ RABBITMQ_USER }} - -# The RabbitMQ password. (string value) -rabbit_password={{ RABBITMQ_PASSWORD }} - -# the RabbitMQ login method (string value) -#rabbit_login_method=AMQPLAIN - -# The RabbitMQ virtual host. (string value) -#rabbit_virtual_host=/ - -# How frequently to retry connecting with RabbitMQ. (integer -# value) -#rabbit_retry_interval=1 - -# How long to backoff for between retries when connecting to -# RabbitMQ. (integer value) -#rabbit_retry_backoff=2 - -# Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -#rabbit_max_retries=0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change -# this option, you must wipe the RabbitMQ database. (boolean -# value) -#rabbit_ha_queues=false - -# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake -# (boolean value) -#fake_rabbit=false - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet -# interface, or IP. The "host" option should point or resolve -# to this address. (string value) -#rpc_zmq_bind_address=* - -# MatchMaker driver. (string value) -#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost - -# ZeroMQ receiver listening port. (integer value) -#rpc_zmq_port=9501 - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -#rpc_zmq_contexts=1 - -# Maximum number of ingress messages to locally buffer per -# topic. Default is unlimited. (integer value) -#rpc_zmq_topic_backlog= - -# Directory for holding IPC sockets. (string value) -#rpc_zmq_ipc_dir=/var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP -# address. Must match "host" option, if running Nova. (string -# value) -#rpc_zmq_host=cinder - -# Seconds to wait before a cast expires (TTL). Only supported -# by impl_zmq. (integer value) -#rpc_cast_timeout=30 - -# Heartbeat frequency. (integer value) -#matchmaker_heartbeat_freq=300 - -# Heartbeat time-to-live. (integer value) -#matchmaker_heartbeat_ttl=600 - -# Size of RPC greenthread pool. (integer value) -#rpc_thread_pool_size=64 - -# Driver or drivers to handle sending notifications. (multi -# valued) -notification_driver=messagingv2 - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -#notification_topics=notifications - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout=60 - -# A URL representing the messaging driver to use and its full -# configuration. If not set, we fall back to the rpc_backend -# option and driver specific configuration. (string value) -#transport_url= - -# The messaging driver to use, defaults to rabbit. Other -# drivers include qpid and zmq. (string value) -rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the -# transport_url option. (string value) -control_exchange=cinder - - -# -# Options defined in cinder.exception -# - -# Make exception message format errors fatal. (boolean value) -#fatal_exception_format_errors=false - - -# -# Options defined in cinder.quota -# - -# Number of volumes allowed per project (integer value) -#quota_volumes=10 - -# Number of volume snapshots allowed per project (integer -# value) -#quota_snapshots=10 - -# Number of consistencygroups allowed per project (integer -# value) -#quota_consistencygroups=10 - -# Total amount of storage, in gigabytes, allowed for volumes -# and snapshots per project (integer value) -#quota_gigabytes=1000 - -# Number of volume backups allowed per project (integer value) -#quota_backups=10 - -# Total amount of storage, in gigabytes, allowed for backups -# per project (integer value) -#quota_backup_gigabytes=1000 - -# Number of seconds until a reservation expires (integer -# value) -#reservation_expire=86400 - -# Count of reservations until usage is refreshed (integer -# value) -#until_refresh=0 - -# Number of seconds between subsequent usage refreshes -# (integer value) -#max_age=0 - -# Default driver to use for quota checks (string value) -#quota_driver=cinder.quota.DbQuotaDriver - -# Enables or disables use of default quota class with default -# quota. (boolean value) -#use_default_quota_class=true - - -# -# Options defined in cinder.service -# - -# Interval, in seconds, between nodes reporting state to -# datastore (integer value) -#report_interval=10 - -# Interval, in seconds, between running periodic tasks -# (integer value) -#periodic_interval=60 - -# Range, in seconds, to randomly delay when starting the -# periodic task scheduler to reduce stampeding. (Disable by -# setting to 0) (integer value) -#periodic_fuzzy_delay=60 - -# IP address on which OpenStack Volume API listens (string -# value) -#osapi_volume_listen=0.0.0.0 - -# Port on which OpenStack Volume API listens (integer value) -#osapi_volume_listen_port=8776 - -# Number of workers for OpenStack Volume API service. The -# default is equal to the number of CPUs available. (integer -# value) -#osapi_volume_workers= - - -# -# Options defined in cinder.ssh_utils -# - -# Option to enable strict host key checking. When set to -# "True" Cinder will only connect to systems with a host key -# present in the configured "ssh_hosts_key_file". When set to -# "False" the host key will be saved upon first connection and -# used for subsequent connections. Default=False (boolean -# value) -#strict_ssh_host_key_policy=false - -# File containing SSH host keys for the systems with which -# Cinder needs to communicate. OPTIONAL: -# Default=$state_path/ssh_known_hosts (string value) -#ssh_hosts_key_file=$state_path/ssh_known_hosts - - -# -# Options defined in cinder.test -# - -# File name of clean sqlite db (string value) -#sqlite_clean_db=clean.sqlite - - -# -# Options defined in cinder.wsgi -# - -# Maximum line size of message headers to be accepted. -# max_header_line may need to be increased when using large -# tokens (typically those generated by the Keystone v3 API -# with big service catalogs). (integer value) -#max_header_line=16384 - -# If False, closes the client socket connection explicitly. -# Setting it to True to maintain backward compatibility. -# Recommended setting is set it to False. (boolean value) -#wsgi_keep_alive=true - -# Sets the value of TCP_KEEPALIVE (True/False) for each server -# socket. (boolean value) -#tcp_keepalive=true - -# Sets the value of TCP_KEEPIDLE in seconds for each server -# socket. Not supported on OS X. (integer value) -#tcp_keepidle=600 - -# Sets the value of TCP_KEEPINTVL in seconds for each server -# socket. Not supported on OS X. (integer value) -#tcp_keepalive_interval= - -# Sets the value of TCP_KEEPCNT for each server socket. Not -# supported on OS X. (integer value) -#tcp_keepalive_count= - -# CA certificate file to use to verify connecting clients -# (string value) -#ssl_ca_file= - -# Certificate file to use when starting the server securely -# (string value) -#ssl_cert_file= - -# Private key file to use when starting the server securely -# (string value) -#ssl_key_file= - - -# -# Options defined in cinder.api.common -# - -# The maximum number of items that a collection resource -# returns in a single response (integer value) -#osapi_max_limit=1000 - -# Base URL that will be presented to users in links to the -# OpenStack Volume API (string value) -# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix -#osapi_volume_base_URL= - - -# -# Options defined in cinder.api.middleware.auth -# - -# Treat X-Forwarded-For as the canonical remote address. Only -# enable this if you have a sanitizing proxy. (boolean value) -#use_forwarded_for=false - - -# -# Options defined in cinder.api.middleware.sizelimit -# - -# Max size for body of a request (integer value) -#osapi_max_request_body_size=114688 - - -# -# Options defined in cinder.backup.driver -# - -# Backup metadata version to be used when backing up volume -# metadata. If this number is bumped, make sure the service -# doing the restore supports the new version. (integer value) -#backup_metadata_version=1 - - -# -# Options defined in cinder.backup.drivers.ceph -# - -# Ceph configuration file to use. (string value) -#backup_ceph_conf=/etc/ceph/ceph.conf - -# The Ceph user to connect with. Default here is to use the -# same user as for Cinder volumes. If not using cephx this -# should be set to None. (string value) -#backup_ceph_user=cinder - -# The chunk size, in bytes, that a backup is broken into -# before transfer to the Ceph object store. (integer value) -#backup_ceph_chunk_size=134217728 - -# The Ceph pool where volume backups are stored. (string -# value) -#backup_ceph_pool=backups - -# RBD stripe unit to use when creating a backup image. -# (integer value) -#backup_ceph_stripe_unit=0 - -# RBD stripe count to use when creating a backup image. -# (integer value) -#backup_ceph_stripe_count=0 - -# If True, always discard excess bytes when restoring volumes -# i.e. pad with zeroes. (boolean value) -#restore_discard_excess_bytes=true - - -# -# Options defined in cinder.backup.drivers.swift -# - -# The URL of the Swift endpoint (string value) -#backup_swift_url= - -# Info to match when looking for swift in the service catalog. -# Format is: separated values of the form: -# :: - Only used if -# backup_swift_url is unset (string value) -#swift_catalog_info=object-store:swift:publicURL - -# Swift authentication mechanism (string value) -#backup_swift_auth=per_user - -# Swift authentication version. Specify "1" for auth 1.0, or -# "2" for auth 2.0 (string value) -#backup_swift_auth_version=1 - -# Swift tenant/account name. Required when connecting to an -# auth 2.0 system (string value) -#backup_swift_tenant= - -# Swift user name (string value) -#backup_swift_user= - -# Swift key for authentication (string value) -#backup_swift_key= - -# The default Swift container to use (string value) -#backup_swift_container=volumebackups - -# The size in bytes of Swift backup objects (integer value) -#backup_swift_object_size=52428800 - -# The number of retries to make for Swift operations (integer -# value) -#backup_swift_retry_attempts=3 - -# The backoff time in seconds between Swift retries (integer -# value) -#backup_swift_retry_backoff=2 - -# Compression algorithm (None to disable) (string value) -#backup_compression_algorithm=zlib - - -# -# Options defined in cinder.backup.drivers.tsm -# - -# Volume prefix for the backup id when backing up to TSM -# (string value) -#backup_tsm_volume_prefix=backup - -# TSM password for the running username (string value) -#backup_tsm_password=password - -# Enable or Disable compression for backups (boolean value) -#backup_tsm_compression=true - - -# -# Options defined in cinder.backup.manager -# - -# Driver to use for backups. (string value) -# Deprecated group/name - [DEFAULT]/backup_service -#backup_driver=cinder.backup.drivers.swift - - -# -# Options defined in cinder.common.config -# - -# File name for the paste.deploy config for cinder-api (string -# value) -api_paste_config=api-paste.ini - -# Top-level directory for maintaining cinder's state (string -# value) -# Deprecated group/name - [DEFAULT]/pybasedir -state_path=/var/lib/cinder - -# IP address of this host (string value) -my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# Default glance host name or IP (string value) -glance_host={{ CONTROLLER_HOST_ADDRESS }} - -# Default glance port (integer value) -#glance_port=9292 - -# A list of the glance API servers available to cinder -# ([hostname|ip]:port) (list value) -#glance_api_servers=$glance_host:$glance_port - -# Version of the glance API to use (integer value) -#glance_api_version=1 - -# Number retries when downloading an image from glance -# (integer value) -#glance_num_retries=0 - -# Allow to perform insecure SSL (https) requests to glance -# (boolean value) -#glance_api_insecure=false - -# Enables or disables negotiation of SSL layer compression. In -# some cases disabling compression can improve data -# throughput, such as when high network bandwidth is available -# and you use compressed image formats like qcow2. (boolean -# value) -#glance_api_ssl_compression=false - -# Location of ca certificates file to use for glance client -# requests. (string value) -#glance_ca_certificates_file= - -# http/https timeout value for glance operations. If no value -# (None) is supplied here, the glanceclient default value is -# used. (integer value) -#glance_request_timeout= - -# The topic that scheduler nodes listen on (string value) -#scheduler_topic=cinder-scheduler - -# The topic that volume nodes listen on (string value) -#volume_topic=cinder-volume - -# The topic that volume backup nodes listen on (string value) -#backup_topic=cinder-backup - -# DEPRECATED: Deploy v1 of the Cinder API. (boolean value) -#enable_v1_api=true - -# Deploy v2 of the Cinder API. (boolean value) -#enable_v2_api=true - -# Enables or disables rate limit of the API. (boolean value) -#api_rate_limit=true - -# Specify list of extensions to load when using -# osapi_volume_extension option with -# cinder.api.contrib.select_extensions (list value) -#osapi_volume_ext_list= - -# osapi volume extension to load (multi valued) -#osapi_volume_extension=cinder.api.contrib.standard_extensions - -# Full class name for the Manager for volume (string value) -#volume_manager=cinder.volume.manager.VolumeManager - -# Full class name for the Manager for volume backup (string -# value) -#backup_manager=cinder.backup.manager.BackupManager - -# Full class name for the Manager for scheduler (string value) -#scheduler_manager=cinder.scheduler.manager.SchedulerManager - -# Name of this node. This can be an opaque identifier. It is -# not necessarily a host name, FQDN, or IP address. (string -# value) -#host=cinder - -# Availability zone of this node (string value) -#storage_availability_zone=nova - -# Default availability zone for new volumes. If not set, the -# storage_availability_zone option value is used as the -# default for new volumes. (string value) -#default_availability_zone= - -# Default volume type to use (string value) -#default_volume_type= - -# Time period for which to generate volume usages. The options -# are hour, day, month, or year. (string value) -#volume_usage_audit_period=month - -# Path to the rootwrap configuration file to use for running -# commands as root (string value) -rootwrap_config=/etc/cinder/rootwrap.conf - -# Enable monkey patching (boolean value) -#monkey_patch=false - -# List of modules/decorators to monkey patch (list value) -#monkey_patch_modules= - -# Maximum time since last check-in for a service to be -# considered up (integer value) -#service_down_time=60 - -# The full class name of the volume API class to use (string -# value) -#volume_api_class=cinder.volume.api.API - -# The full class name of the volume backup API class (string -# value) -#backup_api_class=cinder.backup.api.API - -# The strategy to use for auth. Supports noauth, keystone, and -# deprecated. (string value) -auth_strategy=keystone - -# A list of backend names to use. These backend names should -# be backed by a unique [CONFIG] group with its options (list -# value) -#enabled_backends= - -# Whether snapshots count against GigaByte quota (boolean -# value) -#no_snapshot_gb_quota=false - -# The full class name of the volume transfer API class (string -# value) -#transfer_api_class=cinder.transfer.api.API - -# The full class name of the volume replication API class -# (string value) -#replication_api_class=cinder.replication.api.API - -# The full class name of the consistencygroup API class -# (string value) -#consistencygroup_api_class=cinder.consistencygroup.api.API - - -# -# Options defined in cinder.compute -# - -# The full class name of the compute API class to use (string -# value) -#compute_api_class=cinder.compute.nova.API - - -# -# Options defined in cinder.compute.nova -# - -# Match this value when searching for nova in the service -# catalog. Format is: separated values of the form: -# :: (string value) -#nova_catalog_info=compute:nova:publicURL - -# Same as nova_catalog_info, but for admin endpoint. (string -# value) -#nova_catalog_admin_info=compute:nova:adminURL - -# Override service catalog lookup with template for nova -# endpoint e.g. http://localhost:8774/v2/%(project_id)s -# (string value) -#nova_endpoint_template= - -# Same as nova_endpoint_template, but for admin endpoint. -# (string value) -#nova_endpoint_admin_template= - -# Region name of this node (string value) -#os_region_name= - -# Location of ca certificates file to use for nova client -# requests. (string value) -#nova_ca_certificates_file= - -# Allow to perform insecure SSL requests to nova (boolean -# value) -#nova_api_insecure=false - - -# -# Options defined in cinder.db.api -# - -# The backend to use for db (string value) -#db_backend=sqlalchemy - -# Services to be added to the available pool on create -# (boolean value) -#enable_new_services=true - -# Template string to be used to generate volume names (string -# value) -volume_name_template=volume-%s - -# Template string to be used to generate snapshot names -# (string value) -#snapshot_name_template=snapshot-%s - -# Template string to be used to generate backup names (string -# value) -#backup_name_template=backup-%s - - -# -# Options defined in cinder.db.base -# - -# Driver to use for database access (string value) -#db_driver=cinder.db - - -# -# Options defined in cinder.image.glance -# - -# Default core properties of image (list value) -#glance_core_properties=checksum,container_format,disk_format,image_name,image_id,min_disk,min_ram,name,size - -# A list of url schemes that can be downloaded directly via -# the direct_url. Currently supported schemes: [file]. (list -# value) -#allowed_direct_url_schemes= - - -# -# Options defined in cinder.image.image_utils -# - -# Directory used for temporary storage during image conversion -# (string value) -#image_conversion_dir=$state_path/conversion - - -# -# Options defined in cinder.openstack.common.eventlet_backdoor -# - -# Enable eventlet backdoor. Acceptable values are 0, , -# and :, where 0 results in listening on a random -# tcp port number; results in listening on the -# specified port number (and not enabling backdoor if that -# port is in use); and : results in listening on -# the smallest unused port number within the specified range -# of port numbers. The chosen port is displayed in the -# service's log file. (string value) -#backdoor_port= - - -# -# Options defined in cinder.openstack.common.lockutils -# - -# Whether to disable inter-process locks (boolean value) -#disable_process_locking=false - -# Directory to use for lock files. Default to a temp directory -# (string value) -lock_path=/var/lock/cinder - - -# -# Options defined in cinder.openstack.common.log -# - -# Print debugging output (set logging level to DEBUG instead -# of default WARNING level). (boolean value) -#debug=false - -# Print more verbose output (set logging level to INFO instead -# of default WARNING level). (boolean value) -#verbose=false - -# Log output to standard error. (boolean value) -#use_stderr=true - -# Format string to use for log messages with context. (string -# value) -#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages without context. -# (string value) -#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG. (string -# value) -#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. -# (string value) -#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s - -# List of logger=LEVEL pairs. (list value) -#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN - -# Enables or disables publication of error events. (boolean -# value) -#publish_errors=false - -# Enables or disables fatal status of deprecations. (boolean -# value) -#fatal_deprecations=false - -# The format for an instance that is passed with the log -# message. (string value) -#instance_format="[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log -# message. (string value) -#instance_uuid_format="[instance: %(uuid)s] " - -# The name of a logging configuration file. This file is -# appended to any existing logging configuration files. For -# details about logging configuration files, see the Python -# logging module documentation. (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append= - -# DEPRECATED. A logging.Formatter log message format string -# which may use any of the available logging.LogRecord -# attributes. This option is deprecated. Please use -# logging_context_format_string and -# logging_default_format_string instead. (string value) -#log_format= - -# Format string for %%(asctime)s in log records. Default: -# %(default)s . (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If no default is -# set, logging will go to stdout. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file= - -# (Optional) The base directory used for relative --log-file -# paths. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir= - -# Use syslog for logging. Existing syslog format is DEPRECATED -# during I, and will change in J to honor RFC5424. (boolean -# value) -use_syslog = True - -# (Optional) Enables or disables syslog rfc5424 format for -# logging. If enabled, prefixes the MSG part of the syslog -# message with APP-NAME (RFC5424). The format without the APP- -# NAME is deprecated in I, and will be removed in J. (boolean -# value) -#use_syslog_rfc_format=false - -# Syslog facility to receive log lines. (string value) -#syslog_log_facility=LOG_USER - - -# -# Options defined in cinder.openstack.common.periodic_task -# - -# Some periodic tasks can be run in a separate process. Should -# we run them here? (boolean value) -#run_external_periodic_tasks=true - - -# -# Options defined in cinder.openstack.common.policy -# - -# The JSON file that defines policies. (string value) -#policy_file=policy.json - -# Default rule. Enforced when a requested rule is not found. -# (string value) -#policy_default_rule=default - - -# -# Options defined in cinder.scheduler.driver -# - -# The scheduler host manager class to use (string value) -#scheduler_host_manager=cinder.scheduler.host_manager.HostManager - -# Maximum number of attempts to schedule an volume (integer -# value) -#scheduler_max_attempts=3 - - -# -# Options defined in cinder.scheduler.host_manager -# - -# Which filter class names to use for filtering hosts when not -# specified in the request. (list value) -#scheduler_default_filters=AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter - -# Which weigher class names to use for weighing hosts. (list -# value) -#scheduler_default_weighers=CapacityWeigher - - -# -# Options defined in cinder.scheduler.manager -# - -# Default scheduler driver to use (string value) -#scheduler_driver=cinder.scheduler.filter_scheduler.FilterScheduler - - -# -# Options defined in cinder.scheduler.scheduler_options -# - -# Absolute path to scheduler configuration JSON file. (string -# value) -#scheduler_json_config_location= - - -# -# Options defined in cinder.scheduler.simple -# - -# This configure option has been deprecated along with the -# SimpleScheduler. New scheduler is able to gather capacity -# information for each host, thus setting the maximum number -# of volume gigabytes for host is no longer needed. It's safe -# to remove this configure from cinder.conf. (integer value) -#max_gigabytes=10000 - - -# -# Options defined in cinder.scheduler.weights.capacity -# - -# Multiplier used for weighing volume capacity. Negative -# numbers mean to stack vs spread. (floating point value) -#capacity_weight_multiplier=1.0 - -# Multiplier used for weighing volume capacity. Negative -# numbers mean to stack vs spread. (floating point value) -#allocated_capacity_weight_multiplier=-1.0 - - -# -# Options defined in cinder.scheduler.weights.volume_number -# - -# Multiplier used for weighing volume number. Negative numbers -# mean to spread vs stack. (floating point value) -#volume_number_multiplier=-1.0 - - -# -# Options defined in cinder.transfer.api -# - -# The number of characters in the salt. (integer value) -#volume_transfer_salt_length=8 - -# The number of characters in the autogenerated auth key. -# (integer value) -#volume_transfer_key_length=16 - - -# -# Options defined in cinder.volume.api -# - -# Cache volume availability zones in memory for the provided -# duration in seconds (integer value) -#az_cache_duration=3600 - -# Create volume from snapshot at the host where snapshot -# resides (boolean value) -#snapshot_same_host=true - -# Ensure that the new volumes are the same AZ as snapshot or -# source volume (boolean value) -#cloned_volume_same_az=true - - -# -# Options defined in cinder.volume.driver -# - -# The maximum number of times to rescan iSER targetto find -# volume (integer value) -#num_iser_scan_tries=3 - -# The maximum number of iSER target IDs per host (integer -# value) -#iser_num_targets=100 - -# Prefix for iSER volumes (string value) -#iser_target_prefix=iqn.2010-10.org.iser.openstack: - -# The IP address that the iSER daemon is listening on (string -# value) -#iser_ip_address=$my_ip - -# The port that the iSER daemon is listening on (integer -# value) -#iser_port=3260 - -# The name of the iSER target user-land tool to use (string -# value) -#iser_helper=tgtadm - -# Number of times to attempt to run flakey shell commands -# (integer value) -#num_shell_tries=3 - -# The percentage of backend capacity is reserved (integer -# value) -#reserved_percentage=0 - -# The maximum number of iSCSI target IDs per host (integer -# value) -#iscsi_num_targets=100 - -# Prefix for iSCSI volumes (string value) -#iscsi_target_prefix=iqn.2010-10.org.openstack: - -# The IP address that the iSCSI daemon is listening on (string -# value) -iscsi_ip_address={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# The port that the iSCSI daemon is listening on (integer -# value) -#iscsi_port=3260 - -# The maximum number of times to rescan targets to find volume -# (integer value) -# Deprecated group/name - [DEFAULT]/num_iscsi_scan_tries -#num_volume_device_scan_tries=3 - -# The backend name for a given driver implementation (string -# value) -volume_backend_name=LVM_iSCSI - -# Do we attach/detach volumes in cinder using multipath for -# volume to image and image to volume transfers? (boolean -# value) -#use_multipath_for_image_xfer=false - -# Method used to wipe old volumes (valid options are: none, -# zero, shred) (string value) -#volume_clear=zero - -# Size in MiB to wipe at start of old volumes. 0 => all -# (integer value) -#volume_clear_size=0 - -# The flag to pass to ionice to alter the i/o priority of the -# process used to zero a volume after deletion, for example -# "-c3" for idle only priority. (string value) -#volume_clear_ionice= - -# iSCSI target user-land tool to use. tgtadm is default, use -# lioadm for LIO iSCSI support, iseradm for the ISER protocol, -# or fake for testing. (string value) -iscsi_helper=lioadm - -# Volume configuration file storage directory (string value) -volumes_dir=$state_path/volumes - -# IET configuration file (string value) -#iet_conf=/etc/iet/ietd.conf - -# Comma-separated list of initiator IQNs allowed to connect to -# the iSCSI target. (From Nova compute nodes.) (string value) -#lio_initiator_iqns= - -# Sets the behavior of the iSCSI target to either perform -# blockio or fileio optionally, auto can be set and Cinder -# will autodetect type of backing device (string value) -#iscsi_iotype=fileio - -# The default block size used when copying/clearing volumes -# (string value) -#volume_dd_blocksize=1M - -# The blkio cgroup name to be used to limit bandwidth of -# volume copy (string value) -#volume_copy_blkio_cgroup_name=cinder-volume-copy - -# The upper limit of bandwidth of volume copy. 0 => unlimited -# (integer value) -#volume_copy_bps_limit=0 - -# Sets the behavior of the iSCSI target to either perform -# write-back(on) or write-through(off). This parameter is -# valid if iscsi_helper is set to tgtadm or iseradm. (string -# value) -#iscsi_write_cache=on - -# The path to the client certificate key for verification, if -# the driver supports it. (string value) -#driver_client_cert_key= - -# The path to the client certificate for verification, if the -# driver supports it. (string value) -#driver_client_cert= - - -# -# Options defined in cinder.volume.drivers.block_device -# - -# List of all available devices (list value) -#available_devices= - - -# -# Options defined in cinder.volume.drivers.coraid -# - -# IP address of Coraid ESM (string value) -#coraid_esm_address= - -# User name to connect to Coraid ESM (string value) -#coraid_user=admin - -# Name of group on Coraid ESM to which coraid_user belongs -# (must have admin privilege) (string value) -#coraid_group=admin - -# Password to connect to Coraid ESM (string value) -#coraid_password=password - -# Volume Type key name to store ESM Repository Name (string -# value) -#coraid_repository_key=coraid_repository - - -# -# Options defined in cinder.volume.drivers.datera -# - -# Datera API token. (string value) -#datera_api_token= - -# Datera API port. (string value) -#datera_api_port=7717 - -# Datera API version. (string value) -#datera_api_version=1 - -# Number of replicas to create of an inode. (string value) -#datera_num_replicas=3 - - -# -# Options defined in cinder.volume.drivers.emc.emc_vmax_common -# - -# use this file for cinder emc plugin config data (string -# value) -#cinder_emc_config_file=/etc/cinder/cinder_emc_config.xml - - -# -# Options defined in cinder.volume.drivers.emc.emc_vnx_cli -# - -# VNX authentication scope type. (string value) -#storage_vnx_authentication_type=global - -# Directory path that contains the VNX security file. Make -# sure the security file is generated first. (string value) -#storage_vnx_security_file_dir= - -# Naviseccli Path. (string value) -#naviseccli_path= - -# Storage pool name. (string value) -#storage_vnx_pool_name= - -# VNX secondary SP IP Address. (string value) -#san_secondary_ip= - -# Default timeout for CLI operations in minutes. For example, -# LUN migration is a typical long running operation, which -# depends on the LUN size and the load of the array. An upper -# bound in the specific deployment can be set to avoid -# unnecessary long wait. By default, it is 365 days long. -# (integer value) -#default_timeout=525600 - -# Default max number of LUNs in a storage group. By default, -# the value is 255. (integer value) -#max_luns_per_storage_group=255 - -# To destroy storage group when the last LUN is removed from -# it. By default, the value is False. (boolean value) -#destroy_empty_storage_group=false - -# Mapping between hostname and its iSCSI initiator IP -# addresses. (string value) -#iscsi_initiators= - -# Automatically register initiators. By default, the value is -# False. (boolean value) -#initiator_auto_registration=false - - -# -# Options defined in cinder.volume.drivers.eqlx -# - -# Group name to use for creating volumes (string value) -#eqlx_group_name=group-0 - -# Timeout for the Group Manager cli command execution (integer -# value) -#eqlx_cli_timeout=30 - -# Maximum retry count for reconnection (integer value) -#eqlx_cli_max_retries=5 - -# Use CHAP authentication for targets? (boolean value) -#eqlx_use_chap=false - -# Existing CHAP account name (string value) -#eqlx_chap_login=admin - -# Password for specified CHAP account name (string value) -#eqlx_chap_password=password - -# Pool in which volumes will be created (string value) -#eqlx_pool=default - - -# -# Options defined in cinder.volume.drivers.fujitsu_eternus_dx_common -# - -# The configuration file for the Cinder SMI-S driver (string -# value) -#cinder_smis_config_file=/etc/cinder/cinder_fujitsu_eternus_dx.xml - - -# -# Options defined in cinder.volume.drivers.fusionio.ioControl -# - -# amount of time wait for iSCSI target to come online (integer -# value) -#fusionio_iocontrol_targetdelay=5 - -# number of retries for GET operations (integer value) -#fusionio_iocontrol_retry=3 - -# verify the array certificate on each transaction (boolean -# value) -#fusionio_iocontrol_verify_cert=true - - -# -# Options defined in cinder.volume.drivers.glusterfs -# - -# File with the list of available gluster shares (string -# value) -#glusterfs_shares_config=/etc/cinder/glusterfs_shares - -# Create volumes as sparsed files which take no space.If set -# to False volume is created as regular file.In such case -# volume creation takes a lot of time. (boolean value) -#glusterfs_sparsed_volumes=true - -# Create volumes as QCOW2 files rather than raw files. -# (boolean value) -#glusterfs_qcow2_volumes=false - -# Base dir containing mount points for gluster shares. (string -# value) -#glusterfs_mount_point_base=$state_path/mnt - - -# -# Options defined in cinder.volume.drivers.hds.hds -# - -# The configuration file for the Cinder HDS driver for HUS -# (string value) -#hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml - - -# -# Options defined in cinder.volume.drivers.hds.iscsi -# - -# Configuration file for HDS iSCSI cinder plugin (string -# value) -#hds_hnas_iscsi_config_file=/opt/hds/hnas/cinder_iscsi_conf.xml - - -# -# Options defined in cinder.volume.drivers.hds.nfs -# - -# Configuration file for HDS NFS cinder plugin (string value) -#hds_hnas_nfs_config_file=/opt/hds/hnas/cinder_nfs_conf.xml - - -# -# Options defined in cinder.volume.drivers.hitachi.hbsd_common -# - -# Serial number of storage system (string value) -#hitachi_serial_number= - -# Name of an array unit (string value) -#hitachi_unit_name= - -# Pool ID of storage system (integer value) -#hitachi_pool_id= - -# Thin pool ID of storage system (integer value) -#hitachi_thin_pool_id= - -# Range of logical device of storage system (string value) -#hitachi_ldev_range= - -# Default copy method of storage system (string value) -#hitachi_default_copy_method=FULL - -# Copy speed of storage system (integer value) -#hitachi_copy_speed=3 - -# Interval to check copy (integer value) -#hitachi_copy_check_interval=3 - -# Interval to check copy asynchronously (integer value) -#hitachi_async_copy_check_interval=10 - -# Control port names for HostGroup or iSCSI Target (string -# value) -#hitachi_target_ports= - -# Range of group number (string value) -#hitachi_group_range= - -# Request for creating HostGroup or iSCSI Target (boolean -# value) -#hitachi_group_request=false - - -# -# Options defined in cinder.volume.drivers.hitachi.hbsd_fc -# - -# Request for FC Zone creating HostGroup (boolean value) -#hitachi_zoning_request=false - - -# -# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm -# - -# Instance numbers for HORCM (string value) -#hitachi_horcm_numbers=200,201 - -# Username of storage system for HORCM (string value) -#hitachi_horcm_user= - -# Password of storage system for HORCM (string value) -#hitachi_horcm_password= - -# Add to HORCM configuration (boolean value) -#hitachi_horcm_add_conf=true - - -# -# Options defined in cinder.volume.drivers.hitachi.hbsd_iscsi -# - -# Add CHAP user (boolean value) -#hitachi_add_chap_user=false - -# iSCSI authentication method (string value) -#hitachi_auth_method= - -# iSCSI authentication username (string value) -#hitachi_auth_user=HBSD-CHAP-user - -# iSCSI authentication password (string value) -#hitachi_auth_password=HBSD-CHAP-password - - -# -# Options defined in cinder.volume.drivers.huawei -# - -# The configuration file for the Cinder Huawei driver (string -# value) -#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml - - -# -# Options defined in cinder.volume.drivers.ibm.gpfs -# - -# Specifies the path of the GPFS directory where Block Storage -# volume and snapshot files are stored. (string value) -#gpfs_mount_point_base= - -# Specifies the path of the Image service repository in GPFS. -# Leave undefined if not storing images in GPFS. (string -# value) -#gpfs_images_dir= - -# Specifies the type of image copy to be used. Set this when -# the Image service repository also uses GPFS so that image -# files can be transferred efficiently from the Image service -# to the Block Storage service. There are two valid values: -# "copy" specifies that a full copy of the image is made; -# "copy_on_write" specifies that copy-on-write optimization -# strategy is used and unmodified blocks of the image file are -# shared efficiently. (string value) -#gpfs_images_share_mode= - -# Specifies an upper limit on the number of indirections -# required to reach a specific block due to snapshots or -# clones. A lengthy chain of copy-on-write snapshots or -# clones can have a negative impact on performance, but -# improves space utilization. 0 indicates unlimited clone -# depth. (integer value) -#gpfs_max_clone_depth=0 - -# Specifies that volumes are created as sparse files which -# initially consume no space. If set to False, the volume is -# created as a fully allocated file, in which case, creation -# may take a significantly longer time. (boolean value) -#gpfs_sparse_volumes=true - -# Specifies the storage pool that volumes are assigned to. By -# default, the system storage pool is used. (string value) -#gpfs_storage_pool=system - - -# -# Options defined in cinder.volume.drivers.ibm.ibmnas -# - -# IP address or Hostname of NAS system. (string value) -#nas_ip= - -# User name to connect to NAS system. (string value) -#nas_login=admin - -# Password to connect to NAS system. (string value) -#nas_password= - -# SSH port to use to connect to NAS system. (integer value) -#nas_ssh_port=22 - -# Filename of private key to use for SSH authentication. -# (string value) -#nas_private_key= - -# IBMNAS platform type to be used as backend storage; valid -# values are - v7ku : for using IBM Storwize V7000 Unified, -# sonas : for using IBM Scale Out NAS, gpfs-nas : for using -# NFS based IBM GPFS deployments. (string value) -#ibmnas_platform_type=v7ku - - -# -# Options defined in cinder.volume.drivers.ibm.storwize_svc -# - -# Storage system storage pool for volumes (string value) -#storwize_svc_volpool_name=volpool - -# Storage system space-efficiency parameter for volumes -# (percentage) (integer value) -#storwize_svc_vol_rsize=2 - -# Storage system threshold for volume capacity warnings -# (percentage) (integer value) -#storwize_svc_vol_warning=0 - -# Storage system autoexpand parameter for volumes (True/False) -# (boolean value) -#storwize_svc_vol_autoexpand=true - -# Storage system grain size parameter for volumes -# (32/64/128/256) (integer value) -#storwize_svc_vol_grainsize=256 - -# Storage system compression option for volumes (boolean -# value) -#storwize_svc_vol_compression=false - -# Enable Easy Tier for volumes (boolean value) -#storwize_svc_vol_easytier=true - -# The I/O group in which to allocate volumes (integer value) -#storwize_svc_vol_iogrp=0 - -# Maximum number of seconds to wait for FlashCopy to be -# prepared. Maximum value is 600 seconds (10 minutes) (integer -# value) -#storwize_svc_flashcopy_timeout=120 - -# Connection protocol (iSCSI/FC) (string value) -#storwize_svc_connection_protocol=iSCSI - -# Configure CHAP authentication for iSCSI connections -# (Default: Enabled) (boolean value) -#storwize_svc_iscsi_chap_enabled=true - -# Connect with multipath (FC only; iSCSI multipath is -# controlled by Nova) (boolean value) -#storwize_svc_multipath_enabled=false - -# Allows vdisk to multi host mapping (boolean value) -#storwize_svc_multihostmap_enabled=true - -# Indicate whether svc driver is compatible for NPIV setup. If -# it is compatible, it will allow no wwpns being returned on -# get_conn_fc_wwpns during initialize_connection (boolean -# value) -#storwize_svc_npiv_compatibility_mode=false - -# Allow tenants to specify QOS on create (boolean value) -#storwize_svc_allow_tenant_qos=false - -# If operating in stretched cluster mode, specify the name of -# the pool in which mirrored copies are stored.Example: -# "pool2" (string value) -#storwize_svc_stretched_cluster_partner= - - -# -# Options defined in cinder.volume.drivers.ibm.xiv_ds8k -# - -# Proxy driver that connects to the IBM Storage Array (string -# value) -#xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy - -# Connection type to the IBM Storage Array -# (fibre_channel|iscsi) (string value) -#xiv_ds8k_connection_type=iscsi - -# CHAP authentication mode, effective only for iscsi -# (disabled|enabled) (string value) -#xiv_chap=disabled - - -# -# Options defined in cinder.volume.drivers.lvm -# - -# Name for the VG that will contain exported volumes (string -# value) -volume_group=cinder-volumes - -# If >0, create LVs with multiple mirrors. Note that this -# requires lvm_mirrors + 2 PVs with available space (integer -# value) -#lvm_mirrors=0 - -# Type of LVM volumes to deploy; (default or thin) (string -# value) -#lvm_type=default - - -# -# Options defined in cinder.volume.drivers.netapp.options -# - -# The vFiler unit on which provisioning of block storage -# volumes will be done. This option is only used by the driver -# when connecting to an instance with a storage family of Data -# ONTAP operating in 7-Mode and the storage protocol selected -# is iSCSI. Only use this option when utilizing the MultiStore -# feature on the NetApp storage system. (string value) -#netapp_vfiler= - -# Administrative user account name used to access the storage -# system or proxy server. (string value) -#netapp_login= - -# Password for the administrative user account specified in -# the netapp_login option. (string value) -#netapp_password= - -# This option specifies the virtual storage server (Vserver) -# name on the storage cluster on which provisioning of block -# storage volumes should occur. If using the NFS storage -# protocol, this parameter is mandatory for storage service -# catalog support (utilized by Cinder volume type extra_specs -# support). If this option is specified, the exports belonging -# to the Vserver will only be used for provisioning in the -# future. Block storage volumes on exports not belonging to -# the Vserver specified by this option will continue to -# function normally. (string value) -#netapp_vserver= - -# The hostname (or IP address) for the storage system or proxy -# server. (string value) -#netapp_server_hostname= - -# The TCP port to use for communication with the storage -# system or proxy server. Traditionally, port 80 is used for -# HTTP and port 443 is used for HTTPS; however, this value -# should be changed if an alternate port has been configured -# on the storage system or proxy server. (integer value) -#netapp_server_port=80 - -# This option is used to specify the path to the E-Series -# proxy application on a proxy server. The value is combined -# with the value of the netapp_transport_type, -# netapp_server_hostname, and netapp_server_port options to -# create the URL used by the driver to connect to the proxy -# application. (string value) -#netapp_webservice_path=/devmgr/v2 - -# This option is only utilized when the storage family is -# configured to eseries. This option is used to restrict -# provisioning to the specified controllers. Specify the value -# of this option to be a comma separated list of controller -# hostnames or IP addresses to be used for provisioning. -# (string value) -#netapp_controller_ips= - -# Password for the NetApp E-Series storage array. (string -# value) -#netapp_sa_password= - -# This option is used to restrict provisioning to the -# specified storage pools. Only dynamic disk pools are -# currently supported. Specify the value of this option to be -# a comma separated list of disk pool names to be used for -# provisioning. (string value) -#netapp_storage_pools= - -# This option is used to define how the controllers in the -# E-Series storage array will work with the particular -# operating system on the hosts that are connected to it. -# (string value) -#netapp_eseries_host_type=linux_dm_mp - -# If the percentage of available space for an NFS share has -# dropped below the value specified by this option, the NFS -# image cache will be cleaned. (integer value) -#thres_avl_size_perc_start=20 - -# When the percentage of available space on an NFS share has -# reached the percentage specified by this option, the driver -# will stop clearing files from the NFS image cache that have -# not been accessed in the last M minutes, where M is the -# value of the expiry_thres_minutes configuration option. -# (integer value) -#thres_avl_size_perc_stop=60 - -# This option specifies the threshold for last access time for -# images in the NFS image cache. When a cache cleaning cycle -# begins, images in the cache that have not been accessed in -# the last M minutes, where M is the value of this parameter, -# will be deleted from the cache to create free space on the -# NFS share. (integer value) -#expiry_thres_minutes=720 - -# This option specifies the path of the NetApp copy offload -# tool binary. Ensure that the binary has execute permissions -# set which allow the effective user of the cinder-volume -# process to execute the file. (string value) -#netapp_copyoffload_tool_path= - -# The quantity to be multiplied by the requested volume size -# to ensure enough space is available on the virtual storage -# server (Vserver) to fulfill the volume creation request. -# (floating point value) -#netapp_size_multiplier=1.2 - -# This option is only utilized when the storage protocol is -# configured to use iSCSI. This option is used to restrict -# provisioning to the specified controller volumes. Specify -# the value of this option to be a comma separated list of -# NetApp controller volume names to be used for provisioning. -# (string value) -#netapp_volume_list= - -# The storage family type used on the storage system; valid -# values are ontap_7mode for using Data ONTAP operating in -# 7-Mode, ontap_cluster for using clustered Data ONTAP, or -# eseries for using E-Series. (string value) -#netapp_storage_family=ontap_cluster - -# The storage protocol to be used on the data path with the -# storage system; valid values are iscsi or nfs. (string -# value) -#netapp_storage_protocol= - -# The transport protocol used when communicating with the -# storage system or proxy server. Valid values are http or -# https. (string value) -#netapp_transport_type=http - - -# -# Options defined in cinder.volume.drivers.nexenta.options -# - -# IP address of Nexenta SA (string value) -#nexenta_host= - -# HTTP port to connect to Nexenta REST API server (integer -# value) -#nexenta_rest_port=2000 - -# Use http or https for REST connection (default auto) (string -# value) -#nexenta_rest_protocol=auto - -# User name to connect to Nexenta SA (string value) -#nexenta_user=admin - -# Password to connect to Nexenta SA (string value) -#nexenta_password=nexenta - -# Nexenta target portal port (integer value) -#nexenta_iscsi_target_portal_port=3260 - -# SA Pool that holds all volumes (string value) -#nexenta_volume=cinder - -# IQN prefix for iSCSI targets (string value) -#nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder- - -# Prefix for iSCSI target groups on SA (string value) -#nexenta_target_group_prefix=cinder/ - -# File with the list of available nfs shares (string value) -#nexenta_shares_config=/etc/cinder/nfs_shares - -# Base directory that contains NFS share mount points (string -# value) -#nexenta_mount_point_base=$state_path/mnt - -# Enables or disables the creation of volumes as sparsed files -# that take no space. If disabled (False), volume is created -# as a regular file, which takes a long time. (boolean value) -#nexenta_sparsed_volumes=true - -# Default compression value for new ZFS folders. (string -# value) -#nexenta_volume_compression=on - -# If set True cache NexentaStor appliance volroot option -# value. (boolean value) -#nexenta_nms_cache_volroot=true - -# Enable stream compression, level 1..9. 1 - gives best speed; -# 9 - gives best compression. (integer value) -#nexenta_rrmgr_compression=0 - -# TCP Buffer size in KiloBytes. (integer value) -#nexenta_rrmgr_tcp_buf_size=4096 - -# Number of TCP connections. (integer value) -#nexenta_rrmgr_connections=2 - -# Block size for volumes (default=blank means 8KB) (string -# value) -#nexenta_blocksize= - -# Enables or disables the creation of sparse volumes (boolean -# value) -#nexenta_sparse=false - - -# -# Options defined in cinder.volume.drivers.nfs -# - -# File with the list of available nfs shares (string value) -#nfs_shares_config=/etc/cinder/nfs_shares - -# Create volumes as sparsed files which take no space.If set -# to False volume is created as regular file.In such case -# volume creation takes a lot of time. (boolean value) -#nfs_sparsed_volumes=true - -# Percent of ACTUAL usage of the underlying volume before no -# new volumes can be allocated to the volume destination. -# (floating point value) -#nfs_used_ratio=0.95 - -# This will compare the allocated to available space on the -# volume destination. If the ratio exceeds this number, the -# destination will no longer be valid. (floating point value) -#nfs_oversub_ratio=1.0 - -# Base dir containing mount points for nfs shares. (string -# value) -#nfs_mount_point_base=$state_path/mnt - -# Mount options passed to the nfs client. See section of the -# nfs man page for details. (string value) -#nfs_mount_options= - - -# -# Options defined in cinder.volume.drivers.nimble -# - -# Nimble Controller pool name (string value) -#nimble_pool_name=default - -# Nimble Subnet Label (string value) -#nimble_subnet_label=* - - -# -# Options defined in cinder.volume.drivers.prophetstor.options -# - -# DPL pool uuid in which DPL volumes are stored. (string -# value) -#dpl_pool= - -# DPL port number. (integer value) -#dpl_port=8357 - - -# -# Options defined in cinder.volume.drivers.pure -# - -# REST API authorization token. (string value) -#pure_api_token= - - -# -# Options defined in cinder.volume.drivers.rbd -# - -# The RADOS pool where rbd volumes are stored (string value) -#rbd_pool=rbd - -# The RADOS client name for accessing rbd volumes - only set -# when using cephx authentication (string value) -#rbd_user= - -# Path to the ceph configuration file (string value) -#rbd_ceph_conf= - -# Flatten volumes created from snapshots to remove dependency -# from volume to snapshot (boolean value) -#rbd_flatten_volume_from_snapshot=false - -# The libvirt uuid of the secret for the rbd_user volumes -# (string value) -#rbd_secret_uuid= - -# Directory where temporary image files are stored when the -# volume driver does not write them directly to the volume. -# (string value) -#volume_tmp_dir= - -# Maximum number of nested volume clones that are taken before -# a flatten occurs. Set to 0 to disable cloning. (integer -# value) -#rbd_max_clone_depth=5 - -# Volumes will be chunked into objects of this size (in -# megabytes). (integer value) -#rbd_store_chunk_size=4 - -# Timeout value (in seconds) used when connecting to ceph -# cluster. If value < 0, no timeout is set and default -# librados value is used. (integer value) -#rados_connect_timeout=-1 - - -# -# Options defined in cinder.volume.drivers.remotefs -# - -# IP address or Hostname of NAS system. (string value) -#nas_ip= - -# User name to connect to NAS system. (string value) -#nas_login=admin - -# Password to connect to NAS system. (string value) -#nas_password= - -# SSH port to use to connect to NAS system. (integer value) -#nas_ssh_port=22 - -# Filename of private key to use for SSH authentication. -# (string value) -#nas_private_key= - - -# -# Options defined in cinder.volume.drivers.san.hp.hp_3par_common -# - -# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 -# (string value) -#hp3par_api_url= - -# 3PAR Super user username (string value) -#hp3par_username= - -# 3PAR Super user password (string value) -#hp3par_password= - -# The CPG to use for volume creation (string value) -#hp3par_cpg=OpenStack - -# The CPG to use for Snapshots for volumes. If empty -# hp3par_cpg will be used (string value) -#hp3par_cpg_snap= - -# The time in hours to retain a snapshot. You can't delete it -# before this expires. (string value) -#hp3par_snapshot_retention= - -# The time in hours when a snapshot expires and is deleted. -# This must be larger than expiration (string value) -#hp3par_snapshot_expiration= - -# Enable HTTP debugging to 3PAR (boolean value) -#hp3par_debug=false - -# List of target iSCSI addresses to use. (list value) -#hp3par_iscsi_ips= - -# Enable CHAP authentication for iSCSI connections. (boolean -# value) -#hp3par_iscsi_chap_enabled=false - - -# -# Options defined in cinder.volume.drivers.san.hp.hp_lefthand_rest_proxy -# - -# HP LeftHand WSAPI Server Url like https://:8081/lhos (string value) -#hplefthand_api_url= - -# HP LeftHand Super user username (string value) -#hplefthand_username= - -# HP LeftHand Super user password (string value) -#hplefthand_password= - -# HP LeftHand cluster name (string value) -#hplefthand_clustername= - -# Configure CHAP authentication for iSCSI connections -# (Default: Disabled) (boolean value) -#hplefthand_iscsi_chap_enabled=false - -# Enable HTTP debugging to LeftHand (boolean value) -#hplefthand_debug=false - - -# -# Options defined in cinder.volume.drivers.san.hp.hp_msa_common -# - -# The VDisk to use for volume creation. (string value) -#msa_vdisk=OpenStack - - -# -# Options defined in cinder.volume.drivers.san.san -# - -# Use thin provisioning for SAN volumes? (boolean value) -#san_thin_provision=true - -# IP address of SAN controller (string value) -#san_ip= - -# Username for SAN controller (string value) -#san_login=admin - -# Password for SAN controller (string value) -#san_password= - -# Filename of private key to use for SSH authentication -# (string value) -#san_private_key= - -# Cluster name to use for creating volumes (string value) -#san_clustername= - -# SSH port to use with SAN (integer value) -#san_ssh_port=22 - -# Execute commands locally instead of over SSH; use if the -# volume service is running on the SAN device (boolean value) -#san_is_local=false - -# SSH connection timeout in seconds (integer value) -#ssh_conn_timeout=30 - -# Minimum ssh connections in the pool (integer value) -#ssh_min_pool_conn=1 - -# Maximum ssh connections in the pool (integer value) -#ssh_max_pool_conn=5 - - -# -# Options defined in cinder.volume.drivers.san.solaris -# - -# The ZFS path under which to create zvols for volumes. -# (string value) -#san_zfs_volume_base=rpool/ - - -# -# Options defined in cinder.volume.drivers.scality -# - -# Path or URL to Scality SOFS configuration file (string -# value) -#scality_sofs_config= - -# Base dir where Scality SOFS shall be mounted (string value) -#scality_sofs_mount_point=$state_path/scality - -# Path from Scality SOFS root to volume dir (string value) -#scality_sofs_volume_dir=cinder/volumes - - -# -# Options defined in cinder.volume.drivers.smbfs -# - -# File with the list of available smbfs shares. (string value) -#smbfs_shares_config=/etc/cinder/smbfs_shares - -# Default format that will be used when creating volumes if no -# volume format is specified. Can be set to: raw, qcow2, vhd -# or vhdx. (string value) -#smbfs_default_volume_format=qcow2 - -# Create volumes as sparsed files which take no space rather -# than regular files when using raw format, in which case -# volume creation takes lot of time. (boolean value) -#smbfs_sparsed_volumes=true - -# Percent of ACTUAL usage of the underlying volume before no -# new volumes can be allocated to the volume destination. -# (floating point value) -#smbfs_used_ratio=0.95 - -# This will compare the allocated to available space on the -# volume destination. If the ratio exceeds this number, the -# destination will no longer be valid. (floating point value) -#smbfs_oversub_ratio=1.0 - -# Base dir containing mount points for smbfs shares. (string -# value) -#smbfs_mount_point_base=$state_path/mnt - -# Mount options passed to the smbfs client. See mount.cifs man -# page for details. (string value) -#smbfs_mount_options=noperm,file_mode=0775,dir_mode=0775 - - -# -# Options defined in cinder.volume.drivers.solidfire -# - -# Set 512 byte emulation on volume creation; (boolean value) -#sf_emulate_512=true - -# Allow tenants to specify QOS on create (boolean value) -#sf_allow_tenant_qos=false - -# Create SolidFire accounts with this prefix. Any string can -# be used here, but the string "hostname" is special and will -# create a prefix using the cinder node hostsname (previous -# default behavior). The default is NO prefix. (string value) -#sf_account_prefix= - -# SolidFire API port. Useful if the device api is behind a -# proxy on a different port. (integer value) -#sf_api_port=443 - - -# -# Options defined in cinder.volume.drivers.vmware.vmdk -# - -# IP address for connecting to VMware ESX/VC server. (string -# value) -#vmware_host_ip= - -# Username for authenticating with VMware ESX/VC server. -# (string value) -#vmware_host_username= - -# Password for authenticating with VMware ESX/VC server. -# (string value) -#vmware_host_password= - -# Optional VIM service WSDL Location e.g -# http:///vimService.wsdl. Optional over-ride to -# default location for bug work-arounds. (string value) -#vmware_wsdl_location= - -# Number of times VMware ESX/VC server API must be retried -# upon connection related issues. (integer value) -#vmware_api_retry_count=10 - -# The interval (in seconds) for polling remote tasks invoked -# on VMware ESX/VC server. (floating point value) -#vmware_task_poll_interval=0.5 - -# Name for the folder in the VC datacenter that will contain -# cinder volumes. (string value) -#vmware_volume_folder=cinder-volumes - -# Timeout in seconds for VMDK volume transfer between Cinder -# and Glance. (integer value) -#vmware_image_transfer_timeout_secs=7200 - -# Max number of objects to be retrieved per batch. Query -# results will be obtained in batches from the server and not -# in one shot. Server may still limit the count to something -# less than the configured value. (integer value) -#vmware_max_objects_retrieval=100 - -# Optional string specifying the VMware VC server version. The -# driver attempts to retrieve the version from VMware VC -# server. Set this configuration only if you want to override -# the VC server version. (string value) -#vmware_host_version= - -# Directory where virtual disks are stored during volume -# backup and restore. (string value) -#vmware_tmp_dir=/tmp - - -# -# Options defined in cinder.volume.drivers.windows.windows -# - -# Path to store VHD backed volumes (string value) -#windows_iscsi_lun_path=C:\iSCSIVirtualDisks - - -# -# Options defined in cinder.volume.drivers.zadara -# - -# Management IP of Zadara VPSA (string value) -#zadara_vpsa_ip= - -# Zadara VPSA port number (string value) -#zadara_vpsa_port= - -# Use SSL connection (boolean value) -#zadara_vpsa_use_ssl=false - -# User name for the VPSA (string value) -#zadara_user= - -# Password for the VPSA (string value) -#zadara_password= - -# Name of VPSA storage pool for volumes (string value) -#zadara_vpsa_poolname= - -# Default thin provisioning policy for volumes (boolean value) -#zadara_vol_thin=true - -# Default encryption policy for volumes (boolean value) -#zadara_vol_encrypt=false - -# Default template for VPSA volume names (string value) -#zadara_vol_name_template=OS_%s - -# Automatically detach from servers on volume delete (boolean -# value) -#zadara_vpsa_auto_detach_on_delete=true - -# Don't halt on deletion of non-existing volumes (boolean -# value) -#zadara_vpsa_allow_nonexistent_delete=true - - -# -# Options defined in cinder.volume.drivers.zfssa.zfssaiscsi -# - -# Storage pool name. (string value) -#zfssa_pool= - -# Project name. (string value) -#zfssa_project= - -# Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k. -# (string value) -#zfssa_lun_volblocksize=8k - -# Flag to enable sparse (thin-provisioned): True, False. -# (boolean value) -#zfssa_lun_sparse=false - -# Data compression-off, lzjb, gzip-2, gzip, gzip-9. (string -# value) -#zfssa_lun_compression= - -# Synchronous write bias-latency, throughput. (string value) -#zfssa_lun_logbias= - -# iSCSI initiator group. (string value) -#zfssa_initiator_group= - -# iSCSI initiator IQNs. (comma separated) (string value) -#zfssa_initiator= - -# iSCSI initiator CHAP user. (string value) -#zfssa_initiator_user= - -# iSCSI initiator CHAP password. (string value) -#zfssa_initiator_password= - -# iSCSI target group name. (string value) -#zfssa_target_group=tgt-grp - -# iSCSI target CHAP user. (string value) -#zfssa_target_user= - -# iSCSI target CHAP password. (string value) -#zfssa_target_password= - -# iSCSI target portal (Data-IP:Port, w.x.y.z:3260). (string -# value) -#zfssa_target_portal= - -# Network interfaces of iSCSI targets. (comma separated) -# (string value) -#zfssa_target_interfaces= - -# REST connection timeout. (seconds) (integer value) -#zfssa_rest_timeout= - - -# -# Options defined in cinder.volume.manager -# - -# Driver to use for volume creation (string value) -volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver - -# Timeout for creating the volume to migrate to when -# performing volume migration (seconds) (integer value) -#migration_create_volume_timeout_secs=300 - -# Offload pending volume delete during volume service startup -# (boolean value) -#volume_service_inithost_offload=false - -# FC Zoning mode configured (string value) -#zoning_mode=none - -# User defined capabilities, a JSON formatted string -# specifying key/value pairs. (string value) -#extra_capabilities={} - - -[BRCD_FABRIC_EXAMPLE] - -# -# Options defined in cinder.zonemanager.drivers.brocade.brcd_fabric_opts -# - -# Management IP of fabric (string value) -#fc_fabric_address= - -# Fabric user ID (string value) -#fc_fabric_user= - -# Password for user (string value) -#fc_fabric_password= - -# Connecting port (integer value) -#fc_fabric_port=22 - -# overridden zoning policy (string value) -#zoning_policy=initiator-target - -# overridden zoning activation state (boolean value) -#zone_activate=true - -# overridden zone name prefix (string value) -#zone_name_prefix= - -# Principal switch WWN of the fabric (string value) -#principal_switch_wwn= - - -[CISCO_FABRIC_EXAMPLE] - -# -# Options defined in cinder.zonemanager.drivers.cisco.cisco_fabric_opts -# - -# Management IP of fabric (string value) -#cisco_fc_fabric_address= - -# Fabric user ID (string value) -#cisco_fc_fabric_user= - -# Password for user (string value) -#cisco_fc_fabric_password= - -# Connecting port (integer value) -#cisco_fc_fabric_port=22 - -# overridden zoning policy (string value) -#cisco_zoning_policy=initiator-target - -# overridden zoning activation state (boolean value) -#cisco_zone_activate=true - -# overridden zone name prefix (string value) -#cisco_zone_name_prefix= - -# VSAN of the Fabric (string value) -#cisco_zoning_vsan= - - -[database] - -# -# Options defined in oslo.db -# - -# The file name to use with SQLite. (string value) -#sqlite_db=oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -#sqlite_synchronous=true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend=sqlalchemy - -# The SQLAlchemy connection string to use to connect to the -# database. (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -connection=postgresql://{{ CINDER_DB_USER }}:{{ CINDER_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/cinder - -# The SQLAlchemy connection string to use to connect to the -# slave database. (string value) -#slave_connection= - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode=TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout=3600 - -# Minimum number of SQL connections to keep open in a pool. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size=1 - -# Maximum number of SQL connections to keep open in a pool. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size= - -# Maximum number of database connection retries during -# startup. Set to -1 to specify an infinite retry count. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries=10 - -# Interval between retries of opening a SQL connection. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval=10 - -# If set, use this value for max_overflow with SQLAlchemy. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow= - -# Verbosity of SQL debugging information: 0=None, -# 100=Everything. (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug=0 - -# Add Python stack traces to SQL as comment strings. (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace=false - -# If set, use this value for pool_timeout with SQLAlchemy. -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout= - -# Enable the experimental use of database reconnect on -# connection lost. (boolean value) -#use_db_reconnect=false - -# Seconds between database connection retries. (integer value) -#db_retry_interval=1 - -# If True, increases the interval between database connection -# retries up to db_max_retry_interval. (boolean value) -#db_inc_retry_interval=true - -# If db_inc_retry_interval is set, the maximum seconds between -# database connection retries. (integer value) -#db_max_retry_interval=10 - -# Maximum database connection retries before error is raised. -# Set to -1 to specify an infinite retry count. (integer -# value) -#db_max_retries=20 - - -# -# Options defined in oslo.db.concurrency -# - -# Enable the experimental use of thread pooling for all DB API -# calls (boolean value) -# Deprecated group/name - [DEFAULT]/dbapi_use_tpool -#use_tpool=false - - -[fc-zone-manager] - -# -# Options defined in cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver -# - -# Southbound connector for zoning operation (string value) -#brcd_sb_connector=cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.BrcdFCZoneClientCLI - - -# -# Options defined in cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver -# - -# Southbound connector for zoning operation (string value) -#cisco_sb_connector=cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI - - -# -# Options defined in cinder.zonemanager.fc_zone_manager -# - -# FC Zone Driver responsible for zone management (string -# value) -#zone_driver=cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver - -# Zoning policy configured by user (string value) -#zoning_policy=initiator-target - -# Comma separated list of fibre channel fabric names. This -# list of names is used to retrieve other SAN credentials for -# connecting to each SAN fabric (string value) -#fc_fabric_names= - -# FC San Lookup Service (string value) -#fc_san_lookup_service=cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService - - -[keymgr] - -# -# Options defined in cinder.keymgr -# - -# The full class name of the key manager API class (string -# value) -#api_class=cinder.keymgr.conf_key_mgr.ConfKeyManager - - -# -# Options defined in cinder.keymgr.conf_key_mgr -# - -# Fixed key returned by key manager, specified in hex (string -# value) -#fixed_key= - - -# -# Options defined in cinder.keymgr.key_mgr -# - -# Authentication url for encryption service. (string value) -#encryption_auth_url=http://localhost:5000/v2.0 - -# Url for encryption service. (string value) -#encryption_api_url=http://localhost:9311/v1 - - -[keystone_authtoken] - -# -# Options defined in keystonemiddleware.auth_token -# - -# Prefix to prepend at the beginning of the path. Deprecated, -# use identity_uri. (string value) -#auth_admin_prefix= - -# Host providing the admin Identity API endpoint. Deprecated, -# use identity_uri. (string value) -#auth_host=127.0.0.1 - -# Port of the admin Identity API endpoint. Deprecated, use -# identity_uri. (integer value) -#auth_port=35357 - -# Protocol of the admin Identity API endpoint (http or https). -# Deprecated, use identity_uri. (string value) -#auth_protocol=https - -# Complete public Identity API endpoint (string value) -auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 - -# Complete admin Identity API endpoint. This should specify -# the unversioned root endpoint e.g. https://localhost:35357/ -# (string value) -identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 - -# API version of the admin Identity API endpoint (string -# value) -#auth_version= - -# Do not handle authorization requests within the middleware, -# but delegate the authorization decision to downstream WSGI -# components (boolean value) -#delay_auth_decision=false - -# Request timeout value for communicating with Identity API -# server. (boolean value) -#http_connect_timeout= - -# How many times are we trying to reconnect when communicating -# with Identity API Server. (integer value) -#http_request_max_retries=3 - -# This option is deprecated and may be removed in a future -# release. Single shared secret with the Keystone -# configuration used for bootstrapping a Keystone -# installation, or otherwise bypassing the normal -# authentication process. This option should not be used, use -# `admin_user` and `admin_password` instead. (string value) -#admin_token= - -# Keystone account username (string value) -admin_user={{ CINDER_SERVICE_USER }} - -# Keystone account password (string value) -admin_password={{ CINDER_SERVICE_PASSWORD }} - -# Keystone service account tenant name to validate user tokens -# (string value) -admin_tenant_name=service - -# Env key for the swift cache (string value) -#cache= - -# Required if Keystone server requires client certificate -# (string value) -#certfile= - -# Required if Keystone server requires client certificate -# (string value) -#keyfile= - -# A PEM encoded Certificate Authority to use when verifying -# HTTPs connections. Defaults to system CAs. (string value) -#cafile= - -# Verify HTTPS connections. (boolean value) -#insecure=false - -# Directory used to cache files related to PKI tokens (string -# value) -#signing_dir= - -# Optionally specify a list of memcached server(s) to use for -# caching. If left undefined, tokens will instead be cached -# in-process. (list value) -# Deprecated group/name - [DEFAULT]/memcache_servers -#memcached_servers= - -# In order to prevent excessive effort spent validating -# tokens, the middleware caches previously-seen tokens for a -# configurable duration (in seconds). Set to -1 to disable -# caching completely. (integer value) -#token_cache_time=300 - -# Determines the frequency at which the list of revoked tokens -# is retrieved from the Identity service (in seconds). A high -# number of revocation events combined with a low cache -# duration may significantly reduce performance. (integer -# value) -#revocation_cache_time=10 - -# (optional) if defined, indicate whether token data should be -# authenticated or authenticated and encrypted. Acceptable -# values are MAC or ENCRYPT. If MAC, token data is -# authenticated (with HMAC) in the cache. If ENCRYPT, token -# data is encrypted and authenticated in the cache. If the -# value is not one of these options or empty, auth_token will -# raise an exception on initialization. (string value) -#memcache_security_strategy= - -# (optional, mandatory if memcache_security_strategy is -# defined) this string is used for key derivation. (string -# value) -#memcache_secret_key= - -# (optional) number of seconds memcached server is considered -# dead before it is tried again. (integer value) -#memcache_pool_dead_retry=300 - -# (optional) max total number of open connections to every -# memcached server. (integer value) -#memcache_pool_maxsize=10 - -# (optional) socket timeout in seconds for communicating with -# a memcache server. (integer value) -#memcache_pool_socket_timeout=3 - -# (optional) number of seconds a connection to memcached is -# held unused in the pool before it is closed. (integer value) -#memcache_pool_unused_timeout=60 - -# (optional) number of seconds that an operation will wait to -# get a memcache client connection from the pool. (integer -# value) -#memcache_pool_conn_get_timeout=10 - -# (optional) use the advanced (eventlet safe) memcache client -# pool. The advanced pool will only work under python 2.x. -# (boolean value) -#memcache_use_advanced_pool=false - -# (optional) indicate whether to set the X-Service-Catalog -# header. If False, middleware will not ask for service -# catalog on token validation and will not set the X-Service- -# Catalog header. (boolean value) -#include_service_catalog=true - -# Used to control the use and type of token binding. Can be -# set to: "disabled" to not check token binding. "permissive" -# (default) to validate binding information if the bind type -# is of a form known to the server and ignore it if not. -# "strict" like "permissive" but if the bind type is unknown -# the token will be rejected. "required" any form of token -# binding is needed to be allowed. Finally the name of a -# binding method that must be present in tokens. (string -# value) -#enforce_token_bind=permissive - -# If true, the revocation list will be checked for cached -# tokens. This requires that PKI tokens are configured on the -# Keystone server. (boolean value) -#check_revocations_for_cached=false - -# Hash algorithms to use for hashing PKI tokens. This may be a -# single algorithm or multiple. The algorithms are those -# supported by Python standard hashlib.new(). The hashes will -# be tried in the order given, so put the preferred one first -# for performance. The result of the first hash will be stored -# in the cache. This will typically be set to multiple values -# only while migrating from a less secure algorithm to a more -# secure one. Once all the old tokens are expired this option -# should be set to a single value for better performance. -# (list value) -#hash_algorithms=md5 - - -[matchmaker_redis] - -# -# Options defined in oslo.messaging -# - -# Host to locate redis. (string value) -#host=127.0.0.1 - -# Use this port to connect to redis host. (integer value) -#port=6379 - -# Password for Redis server (optional). (string value) -#password= - - -[matchmaker_ring] - -# -# Options defined in oslo.messaging -# - -# Matchmaker ring file (JSON). (string value) -# Deprecated group/name - [DEFAULT]/matchmaker_ringfile -#ringfile=/etc/oslo/matchmaker_ring.json - - -[oslo_messaging_amqp] - -# -# Options defined in oslo.messaging -# -# NOTE: Options in this group are supported when using oslo.messaging >=1.5.0. - -# address prefix used when sending to a specific server -# (string value) -#server_request_prefix=exclusive - -# address prefix used when broadcasting to all servers (string -# value) -#broadcast_prefix=broadcast - -# address prefix when sending to any server in group (string -# value) -#group_request_prefix=unicast - -# Name for the AMQP container (string value) -#container_name= - -# Timeout for inactive connections (in seconds) (integer -# value) -#idle_timeout=0 - -# Debug: dump AMQP frames to stdout (boolean value) -#trace=false - -# CA certificate PEM file for verifing server certificate -# (string value) -#ssl_ca_file= - -# Identifying certificate PEM file to present to clients -# (string value) -#ssl_cert_file= - -# Private key PEM file used to sign cert_file certificate -# (string value) -#ssl_key_file= - -# Password for decrypting ssl_key_file (if encrypted) (string -# value) -#ssl_key_password= - -# Accept clients using either SSL or plain TCP (boolean value) -#allow_insecure_clients=false - - -[profiler] - -# -# Options defined in cinder.service -# - -# If False fully disable profiling feature. (boolean value) -#profiler_enabled=false - -# If False doesn't trace SQL requests. (boolean value) -#trace_sqlalchemy=false - - -[ssl] - -# -# Options defined in cinder.openstack.common.sslutils -# - -# CA certificate file to use to verify connecting clients -# (string value) -#ca_file= - -# Certificate file to use when starting the server securely -# (string value) -#cert_file= - -# Private key file to use when starting the server securely -# (string value) -#key_file= - - diff --git a/openstack/usr/share/openstack/cinder/policy.json b/openstack/usr/share/openstack/cinder/policy.json deleted file mode 100644 index 8f3a7b2f..00000000 --- a/openstack/usr/share/openstack/cinder/policy.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "context_is_admin": "role:admin", - "admin_or_owner": "is_admin:True or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - - "admin_api": "is_admin:True", - - "volume:create": "", - "volume:get_all": "", - "volume:get_volume_metadata": "", - "volume:get_volume_admin_metadata": "rule:admin_api", - "volume:delete_volume_admin_metadata": "rule:admin_api", - "volume:update_volume_admin_metadata": "rule:admin_api", - "volume:get_snapshot": "", - "volume:get_all_snapshots": "", - "volume:extend": "", - "volume:update_readonly_flag": "", - "volume:retype": "", - - "volume_extension:types_manage": "rule:admin_api", - "volume_extension:types_extra_specs": "rule:admin_api", - "volume_extension:volume_type_encryption": "rule:admin_api", - "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", - "volume_extension:extended_snapshot_attributes": "", - "volume_extension:volume_image_metadata": "", - - "volume_extension:quotas:show": "", - "volume_extension:quotas:update": "rule:admin_api", - "volume_extension:quota_classes": "", - - "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", - "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", - "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", - "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", - "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", - "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", - "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", - "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", - - "volume_extension:volume_host_attribute": "rule:admin_api", - "volume_extension:volume_tenant_attribute": "rule:admin_or_owner", - "volume_extension:volume_mig_status_attribute": "rule:admin_api", - "volume_extension:hosts": "rule:admin_api", - "volume_extension:services": "rule:admin_api", - - "volume_extension:volume_manage": "rule:admin_api", - "volume_extension:volume_unmanage": "rule:admin_api", - - "volume:services": "rule:admin_api", - - "volume:create_transfer": "", - "volume:accept_transfer": "", - "volume:delete_transfer": "", - "volume:get_all_transfers": "", - - "volume_extension:replication:promote": "rule:admin_api", - "volume_extension:replication:reenable": "rule:admin_api", - - "backup:create" : "", - "backup:delete": "", - "backup:get": "", - "backup:get_all": "", - "backup:restore": "", - "backup:backup-import": "rule:admin_api", - "backup:backup-export": "rule:admin_api", - - "snapshot_extension:snapshot_actions:update_snapshot_status": "", - - "consistencygroup:create" : "group:nobody", - "consistencygroup:delete": "group:nobody", - "consistencygroup:get": "group:nobody", - "consistencygroup:get_all": "group:nobody", - - "consistencygroup:create_cgsnapshot" : "", - "consistencygroup:delete_cgsnapshot": "", - "consistencygroup:get_cgsnapshot": "", - "consistencygroup:get_all_cgsnapshots": "", - - "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api" -} diff --git a/openstack/usr/share/openstack/extras/00-disable-device.network b/openstack/usr/share/openstack/extras/00-disable-device.network deleted file mode 100644 index 8e2532d0..00000000 --- a/openstack/usr/share/openstack/extras/00-disable-device.network +++ /dev/null @@ -1,2 +0,0 @@ -[Match] -Name={{ item }} diff --git a/openstack/usr/share/openstack/extras/60-device-dhcp.network b/openstack/usr/share/openstack/extras/60-device-dhcp.network deleted file mode 100644 index 6fdbfd8d..00000000 --- a/openstack/usr/share/openstack/extras/60-device-dhcp.network +++ /dev/null @@ -1,5 +0,0 @@ -[Match] -Name={{ item }} - -[Network] -DHCP=yes diff --git a/openstack/usr/share/openstack/glance.yml b/openstack/usr/share/openstack/glance.yml deleted file mode 100644 index aa7e4c78..00000000 --- a/openstack/usr/share/openstack/glance.yml +++ /dev/null @@ -1,93 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/glance.conf" - tasks: - - name: Create the glance user. - user: - name: glance - comment: Openstack Glance Daemons - shell: /sbin/nologin - home: /var/lib/glance - - - name: Create the /var folders for glance - file: - path: "{{ item }}" - state: directory - owner: glance - group: glance - with_items: - - /var/run/glance - - /var/lock/glance - - /var/log/glance - - /var/lib/glance - - /var/lib/glance/images - - /var/lib/glance/image-cache - - - name: Create /etc/glance directory - file: - path: /etc/glance - state: directory - - - name: Add the configuration needed for glance in /etc/glance using templates - template: - src: /usr/share/openstack/glance/{{ item }} - dest: /etc/glance/{{ item }} - with_lines: - - cd /usr/share/openstack/glance && find -type f - - - name: Create glance service user in service tenant - keystone_user: - user: "{{ GLANCE_SERVICE_USER }}" - password: "{{ GLANCE_SERVICE_PASSWORD }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Assign admin role to glances service user in the service tenant - keystone_user: - role: admin - user: "{{ GLANCE_SERVICE_USER }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Add glance endpoint - keystone_service: - name: glance - type: image - description: Openstack Image Service - publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292 - internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292 - adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292 - region: regionOne - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Create postgresql user for glance - postgresql_user: - name: "{{ GLANCE_DB_USER }}" - login_host: "{{ CONTROLLER_HOST_ADDRESS }}" - password: "{{ GLANCE_DB_PASSWORD }}" - sudo: yes - sudo_user: glance - - - name: Create database for glance services - postgresql_db: - name: glance - owner: "{{ GLANCE_DB_USER }}" - login_host: "{{ CONTROLLER_HOST_ADDRESS }}" - sudo: yes - sudo_user: glance - - - name: Initiate glance database - glance_manage: - action: dbsync - sudo: yes - sudo_user: glance - - - name: Enable and start openstack-glance services - service: - name: "{{ item }}" - enabled: yes - state: started - with_items: - - openstack-glance-api.service - - openstack-glance-registry.service diff --git a/openstack/usr/share/openstack/glance/glance-api-paste.ini b/openstack/usr/share/openstack/glance/glance-api-paste.ini deleted file mode 100644 index 86a4cdb1..00000000 --- a/openstack/usr/share/openstack/glance/glance-api-paste.ini +++ /dev/null @@ -1,77 +0,0 @@ -# Use this pipeline for no auth or image caching - DEFAULT -[pipeline:glance-api] -pipeline = versionnegotiation osprofiler unauthenticated-context rootapp - -# Use this pipeline for image caching and no auth -[pipeline:glance-api-caching] -pipeline = versionnegotiation osprofiler unauthenticated-context cache rootapp - -# Use this pipeline for caching w/ management interface but no auth -[pipeline:glance-api-cachemanagement] -pipeline = versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp - -# Use this pipeline for keystone auth -[pipeline:glance-api-keystone] -pipeline = versionnegotiation osprofiler authtoken context rootapp - -# Use this pipeline for keystone auth with image caching -[pipeline:glance-api-keystone+caching] -pipeline = versionnegotiation osprofiler authtoken context cache rootapp - -# Use this pipeline for keystone auth with caching and cache management -[pipeline:glance-api-keystone+cachemanagement] -pipeline = versionnegotiation osprofiler authtoken context cache cachemanage rootapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user. -[pipeline:glance-api-trusted-auth] -pipeline = versionnegotiation osprofiler context rootapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user and uses cache management -[pipeline:glance-api-trusted-auth+cachemanagement] -pipeline = versionnegotiation osprofiler context cache cachemanage rootapp - -[composite:rootapp] -paste.composite_factory = glance.api:root_app_factory -/: apiversions -/v1: apiv1app -/v2: apiv2app - -[app:apiversions] -paste.app_factory = glance.api.versions:create_resource - -[app:apiv1app] -paste.app_factory = glance.api.v1.router:API.factory - -[app:apiv2app] -paste.app_factory = glance.api.v2.router:API.factory - -[filter:versionnegotiation] -paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory - -[filter:cache] -paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory - -[filter:cachemanage] -paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory -delay_auth_decision = true - -[filter:gzip] -paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory -hmac_keys = SECRET_KEY -enabled = yes diff --git a/openstack/usr/share/openstack/glance/glance-api.conf b/openstack/usr/share/openstack/glance/glance-api.conf deleted file mode 100644 index 39257a6d..00000000 --- a/openstack/usr/share/openstack/glance/glance-api.conf +++ /dev/null @@ -1,699 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Which backend scheme should Glance use by default is not specified -# in a request to add a new image to Glance? Known schemes are determined -# by the known_stores option below. -# Default: 'file' -default_store = file - -# Maximum image size (in bytes) that may be uploaded through the -# Glance API server. Defaults to 1 TB. -# WARNING: this value should only be increased after careful consideration -# and must be set to a value under 8 EB (9223372036854775808). -#image_size_cap = 1099511627776 - -# Address to bind the API server -bind_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# Port the bind the API server to -bind_port = 9292 - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -# log_file = /var/log/glance/api.log - -# Backlog requests when creating socket -backlog = 4096 - -# TCP_KEEPIDLE value in seconds when creating socket. -# Not supported on OS X. -#tcp_keepidle = 600 - -# API to use for accessing data. Default value points to sqlalchemy -# package, it is also possible to use: glance.db.registry.api -# data_api = glance.db.sqlalchemy.api - -# The number of child process workers that will be -# created to service API requests. The default will be -# equal to the number of CPUs available. (integer value) -#workers = 4 - -# Maximum line size of message headers to be accepted. -# max_header_line may need to be increased when using large tokens -# (typically those generated by the Keystone v3 API with big service -# catalogs) -# max_header_line = 16384 - -# Role used to identify an authenticated user as administrator -#admin_role = admin - -# Allow unauthenticated users to access the API with read-only -# privileges. This only applies when using ContextMiddleware. -#allow_anonymous_access = False - -# Allow access to version 1 of glance api -#enable_v1_api = True - -# Allow access to version 2 of glance api -#enable_v2_api = True - -# Return the URL that references where the data is stored on -# the backend storage system. For example, if using the -# file system store a URL of 'file:///path/to/image' will -# be returned to the user in the 'direct_url' meta-data field. -# The default value is false. -#show_image_direct_url = False - -# Send headers containing user and tenant information when making requests to -# the v1 glance registry. This allows the registry to function as if a user is -# authenticated without the need to authenticate a user itself using the -# auth_token middleware. -# The default value is false. -#send_identity_headers = False - -# Supported values for the 'container_format' image attribute -#container_formats=ami,ari,aki,bare,ovf,ova - -# Supported values for the 'disk_format' image attribute -#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso - -# Directory to use for lock files. Default to a temp directory -# (string value). This setting needs to be the same for both -# glance-scrubber and glance-api. -#lock_path= - -# Property Protections config file -# This file contains the rules for property protections and the roles/policies -# associated with it. -# If this config value is not specified, by default, property protections -# won't be enforced. -# If a value is specified and the file is not found, then the glance-api -# service will not start. -#property_protection_file = - -# Specify whether 'roles' or 'policies' are used in the -# property_protection_file. -# The default value for property_protection_rule_format is 'roles'. -#property_protection_rule_format = roles - -# This value sets what strategy will be used to determine the image location -# order. Currently two strategies are packaged with Glance 'location_order' -# and 'store_type'. -#location_strategy = location_order - -# ================= Syslog Options ============================ - -# Send logs to syslog (/dev/log) instead of to file specified -# by `log_file` -use_syslog = True - -# Facility to use. If unset defaults to LOG_USER. -#syslog_log_facility = LOG_LOCAL0 - -# ================= SSL Options =============================== - -# Certificate file to use when starting API server securely -#cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -#key_file = /path/to/keyfile - -# CA certificate file to use to verify connecting clients -#ca_file = /path/to/cafile - -# ================= Security Options ========================== - -# AES key for encrypting store 'location' metadata, including -# -- if used -- Swift or S3 credentials -# Should be set to a random string of length 16, 24 or 32 bytes -#metadata_encryption_key = <16, 24 or 32 char registry metadata key> - -# ============ Registry Options =============================== - -# Address to find the registry server -registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# Port the registry server is listening on -registry_port = 9191 - -# What protocol to use when connecting to the registry server? -# Set to https for secure HTTP communication -registry_client_protocol = http - -# The path to the key file to use in SSL connections to the -# registry server, if any. Alternately, you may set the -# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file -#registry_client_key_file = /path/to/key/file - -# The path to the cert file to use in SSL connections to the -# registry server, if any. Alternately, you may set the -# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file -#registry_client_cert_file = /path/to/cert/file - -# The path to the certifying authority cert file to use in SSL connections -# to the registry server, if any. Alternately, you may set the -# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file -#registry_client_ca_file = /path/to/ca/file - -# When using SSL in connections to the registry server, do not require -# validation via a certifying authority. This is the registry's equivalent of -# specifying --insecure on the command line using glanceclient for the API -# Default: False -#registry_client_insecure = False - -# The period of time, in seconds, that the API server will wait for a registry -# request to complete. A value of '0' implies no timeout. -# Default: 600 -#registry_client_timeout = 600 - -# Whether to automatically create the database tables. -# Default: False -#db_auto_create = False - -# Enable DEBUG log messages from sqlalchemy which prints every database -# query and response. -# Default: False -#sqlalchemy_debug = True - -# Pass the user's token through for API requests to the registry. -# Default: True -#use_user_token = True - -# If 'use_user_token' is not in effect then admin credentials -# can be specified. Requests to the registry on behalf of -# the API will use these credentials. -# Admin user name -#admin_user = None -# Admin password -#admin_password = None -# Admin tenant name -#admin_tenant_name = None -# Keystone endpoint -#auth_url = None -# Keystone region -#auth_region = None -# Auth strategy -#auth_strategy = keystone - -# ============ Notification System Options ===================== - -# Driver or drivers to handle sending notifications. Set to -# 'messaging' to send notifications to a message queue. -notification_driver = messagingv2 - -# Default publisher_id for outgoing notifications. -# default_publisher_id = image.localhost - -# Messaging driver used for 'messaging' notifications driver -rpc_backend=rabbit - -# Configuration options if sending notifications via rabbitmq -rabbit_host = {{ RABBITMQ_HOST }} -rabbit_port = {{ RABBITMQ_PORT }} -rabbit_use_ssl = false -rabbit_userid = {{ RABBITMQ_USER }} -rabbit_password = {{ RABBITMQ_PASSWORD }} -rabbit_virtual_host = / -rabbit_notification_exchange = glance -rabbit_notification_topic = notifications -rabbit_durable_queues = False - -# Configuration options if sending notifications via Qpid (these are -# the defaults) -#qpid_notification_exchange = glance -#qpid_notification_topic = notifications -#qpid_hostname = localhost -#qpid_port = 5672 -#qpid_username = -#qpid_password = -#qpid_sasl_mechanisms = -#qpid_reconnect_timeout = 0 -#qpid_reconnect_limit = 0 -#qpid_reconnect_interval_min = 0 -#qpid_reconnect_interval_max = 0 -#qpid_reconnect_interval = 0 -#qpid_heartbeat = 5 -# Set to 'ssl' to enable SSL -#qpid_protocol = tcp -#qpid_tcp_nodelay = True - -# ============ Delayed Delete Options ============================= - -# Turn on/off delayed delete -delayed_delete = False - -# Delayed delete time in seconds -scrub_time = 43200 - -# Directory that the scrubber will use to remind itself of what to delete -# Make sure this is also set in glance-scrubber.conf -scrubber_datadir = /var/lib/glance/scrubber - -# =============== Quota Options ================================== - -# The maximum number of image members allowed per image -#image_member_quota = 128 - -# The maximum number of image properties allowed per image -#image_property_quota = 128 - -# The maximum number of tags allowed per image -#image_tag_quota = 128 - -# The maximum number of locations allowed per image -#image_location_quota = 10 - -# Set a system wide quota for every user. This value is the total number -# of bytes that a user can use across all storage systems. A value of -# 0 means unlimited. -#user_storage_quota = 0 - -# =============== Image Cache Options ============================= - -# Base directory that the Image Cache uses -image_cache_dir = /var/lib/glance/image-cache/ - -# =============== Database Options ================================= - -[database] -# The file name to use with SQLite (string value) -#sqlite_db = oslo.sqlite - -# If True, SQLite uses synchronous mode (boolean value) -#sqlite_synchronous = True - -# The backend to use for db (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string used to connect to the -# database (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = -connection=postgresql://{{ GLANCE_DB_USER }}:{{ GLANCE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/glance - - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle sql connections are reaped (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = - -# Maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a sql connection -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on -# connection lost (boolean value) -#use_db_reconnect = False - -# seconds between db connection retries (integer value) -#db_retry_interval = 1 - -# Whether to increase interval between db connection retries, -# up to db_max_retry_interval (boolean value) -#db_inc_retry_interval = True - -# max seconds between db connection retries, if -# db_inc_retry_interval is enabled (integer value) -#db_max_retry_interval = 10 - -# maximum db connection retries before error is raised. -# (setting -1 implies an infinite retry count) (integer value) -#db_max_retries = 20 - -[keystone_authtoken] -auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 -identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 -admin_tenant_name = service -admin_user = {{ GLANCE_SERVICE_USER }} -admin_password = {{ GLANCE_SERVICE_PASSWORD }} -revocation_cache_time = 10 - -[paste_deploy] -# Name of the paste configuration file that defines the available pipelines -#config_file = glance-api-paste.ini - -# Partial name of a pipeline in your paste configuration file with the -# service name removed. For example, if your paste section name is -# [pipeline:glance-api-keystone], you would configure the flavor below -# as 'keystone'. -flavor=keystone - -[store_type_location_strategy] -# The scheme list to use to get store preference order. The scheme must be -# registered by one of the stores defined by the 'known_stores' config option. -# This option will be applied when you using 'store_type' option as image -# location strategy defined by the 'location_strategy' config option. -#store_type_preference = - -[profiler] -# If False fully disable profiling feature. -#enabled = False - -# If False doesn't trace SQL requests. -#trace_sqlalchemy = False - -[task] -# ================= Glance Tasks Options ============================ - -# Specifies how long (in hours) a task is supposed to live in the tasks DB -# after succeeding or failing before getting soft-deleted. -# The default value for task_time_to_live is 48 hours. -# task_time_to_live = 48 - -# Specifies which task executor to be used to run the task scripts. -# The default value for task_executor is eventlet. -# task_executor = eventlet - -# Specifies the maximum number of eventlet threads which can be spun up by -# the eventlet based task executor to perform execution of Glance tasks. -# eventlet_executor_pool_size = 1000 - -[glance_store] -# List of which store classes and store class locations are -# currently known to glance at startup. -# Existing but disabled stores: -# glance.store.rbd.Store, -# glance.store.s3.Store, -# glance.store.swift.Store, -# glance.store.sheepdog.Store, -# glance.store.cinder.Store, -# glance.store.gridfs.Store, -# glance.store.vmware_datastore.Store, -#stores = glance.store.filesystem.Store, -# glance.store.http.Store - -# ============ Filesystem Store Options ======================== - -# Directory that the Filesystem backend store -# writes image data to -filesystem_store_datadir = /var/lib/glance/images/ - -# A list of directories where image data can be stored. -# This option may be specified multiple times for specifying multiple store -# directories. Either one of filesystem_store_datadirs or -# filesystem_store_datadir option is required. A priority number may be given -# after each directory entry, separated by a ":". -# When adding an image, the highest priority directory will be selected, unless -# there is not enough space available in cases where the image size is already -# known. If no priority is given, it is assumed to be zero and the directory -# will be considered for selection last. If multiple directories have the same -# priority, then the one with the most free space available is selected. -# If same store is specified multiple times then BadStoreConfiguration -# exception will be raised. -#filesystem_store_datadirs = /var/lib/glance/images/:1 - -# A path to a JSON file that contains metadata describing the storage -# system. When show_multiple_locations is True the information in this -# file will be returned with any location that is contained in this -# store. -#filesystem_store_metadata_file = None - -# ============ Swift Store Options ============================= - -# Version of the authentication service to use -# Valid versions are '2' for keystone and '1' for swauth and rackspace -swift_store_auth_version = 2 - -# Address where the Swift authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'https://' -# For swauth, use something like '127.0.0.1:8080/v1.0/' -swift_store_auth_address = 127.0.0.1:5000/v2.0/ - -# User to authenticate against the Swift authentication service -# If you use Swift authentication service, set it to 'account':'user' -# where 'account' is a Swift storage account and 'user' -# is a user in that account -swift_store_user = jdoe:jdoe - -# Auth key for the user authenticating against the -# Swift authentication service -swift_store_key = a86850deb2742ec3cb41518e26aa2d89 - -# Container within the account that the account should use -# for storing images in Swift -swift_store_container = glance - -# Do we create the container if it does not exist? -swift_store_create_container_on_put = False - -# What size, in MB, should Glance start chunking image files -# and do a large object manifest in Swift? By default, this is -# the maximum object size in Swift, which is 5GB -swift_store_large_object_size = 5120 - -# swift_store_config_file = glance-swift.conf -# This file contains references for each of the configured -# Swift accounts/backing stores. If used, this option can prevent -# credentials being stored in the database. Using Swift references -# is disabled if this config is left blank. - -# The reference to the default Swift parameters to use for adding new images. -# default_swift_reference = 'ref1' - -# When doing a large object manifest, what size, in MB, should -# Glance write chunks to Swift? This amount of data is written -# to a temporary disk buffer during the process of chunking -# the image file, and the default is 200MB -swift_store_large_object_chunk_size = 200 - -# Whether to use ServiceNET to communicate with the Swift storage servers. -# (If you aren't RACKSPACE, leave this False!) -# -# To use ServiceNET for authentication, prefix hostname of -# `swift_store_auth_address` with 'snet-'. -# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ -swift_enable_snet = False - -# If set to True enables multi-tenant storage mode which causes Glance images -# to be stored in tenant specific Swift accounts. -#swift_store_multi_tenant = False - -# A list of swift ACL strings that will be applied as both read and -# write ACLs to the containers created by Glance in multi-tenant -# mode. This grants the specified tenants/users read and write access -# to all newly created image objects. The standard swift ACL string -# formats are allowed, including: -# : -# : -# *: -# Multiple ACLs can be combined using a comma separated list, for -# example: swift_store_admin_tenants = service:glance,*:admin -#swift_store_admin_tenants = - -# The region of the swift endpoint to be used for single tenant. This setting -# is only necessary if the tenant has multiple swift endpoints. -#swift_store_region = - -# If set to False, disables SSL layer compression of https swift requests. -# Setting to 'False' may improve performance for images which are already -# in a compressed format, eg qcow2. If set to True, enables SSL layer -# compression (provided it is supported by the target swift proxy). -#swift_store_ssl_compression = True - -# The number of times a Swift download will be retried before the -# request fails -#swift_store_retry_get_count = 0 - -# Bypass SSL verification for Swift -#swift_store_auth_insecure = False - -# ============ S3 Store Options ============================= - -# Address where the S3 authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'http://' -s3_store_host = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the S3 authentication service -s3_store_access_key = <20-char AWS access key> - -# Auth key for the user authenticating against the -# S3 authentication service -s3_store_secret_key = <40-char AWS secret key> - -# Container within the account that the account should use -# for storing images in S3. Note that S3 has a flat namespace, -# so you need a unique bucket name for your glance images. An -# easy way to do this is append your AWS access key to "glance". -# S3 buckets in AWS *must* be lowercased, so remember to lowercase -# your AWS access key if you use it in your bucket name below! -s3_store_bucket = glance - -# Do we create the bucket if it does not exist? -s3_store_create_bucket_on_put = False - -# When sending images to S3, the data will first be written to a -# temporary buffer on disk. By default the platform's temporary directory -# will be used. If required, an alternative directory can be specified here. -#s3_store_object_buffer_dir = /path/to/dir - -# When forming a bucket url, boto will either set the bucket name as the -# subdomain or as the first token of the path. Amazon's S3 service will -# accept it as the subdomain, but Swift's S3 middleware requires it be -# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'. -#s3_store_bucket_url_format = subdomain - -# Size, in MB, should S3 start chunking image files -# and do a multipart upload in S3. The default is 100MB. -#s3_store_large_object_size = 100 - -# Multipart upload part size, in MB, should S3 use when uploading -# parts. The size must be greater than or equal to -# 5MB. The default is 10MB. -#s3_store_large_object_chunk_size = 10 - -# The number of thread pools to perform a multipart upload -# in S3. The default is 10. -#s3_store_thread_pools = 10 - -# ============ RBD Store Options ============================= - -# Ceph configuration file path -# If using cephx authentication, this file should -# include a reference to the right keyring -# in a client. section -#rbd_store_ceph_conf = /etc/ceph/ceph.conf - -# RADOS user to authenticate as (only applicable if using cephx) -# If , a default will be chosen based on the client. section -# in rbd_store_ceph_conf -#rbd_store_user = - -# RADOS pool in which images are stored -#rbd_store_pool = images - -# RADOS images will be chunked into objects of this size (in megabytes). -# For best performance, this should be a power of two -#rbd_store_chunk_size = 8 - -# ============ Sheepdog Store Options ============================= - -sheepdog_store_address = localhost - -sheepdog_store_port = 7000 - -# Images will be chunked into objects of this size (in megabytes). -# For best performance, this should be a power of two -sheepdog_store_chunk_size = 64 - -# ============ Cinder Store Options =============================== - -# Info to match when looking for cinder in the service catalog -# Format is : separated values of the form: -# :: (string value) -#cinder_catalog_info = volume:cinder:publicURL - -# Override service catalog lookup with template for cinder endpoint -# e.g. http://localhost:8776/v1/%(project_id)s (string value) -#cinder_endpoint_template = - -# Region name of this node (string value) -#os_region_name = - -# Location of ca certicates file to use for cinder client requests -# (string value) -#cinder_ca_certificates_file = - -# Number of cinderclient retries on failed http calls (integer value) -#cinder_http_retries = 3 - -# Allow to perform insecure SSL requests to cinder (boolean value) -#cinder_api_insecure = False - -# ============ VMware Datastore Store Options ===================== - -# ESX/ESXi or vCenter Server target system. -# The server value can be an IP address or a DNS name -# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com -#vmware_server_host = - -# Server username (string value) -#vmware_server_username = - -# Server password (string value) -#vmware_server_password = - -# Inventory path to a datacenter (string value) -# Value optional when vmware_server_ip is an ESX/ESXi host: if specified -# should be `ha-datacenter`. -#vmware_datacenter_path = - -# Datastore associated with the datacenter (string value) -#vmware_datastore_name = - -# The number of times we retry on failures -# e.g., socket error, etc (integer value) -#vmware_api_retry_count = 10 - -# The interval used for polling remote tasks -# invoked on VMware ESX/VC server in seconds (integer value) -#vmware_task_poll_interval = 5 - -# Absolute path of the folder containing the images in the datastore -# (string value) -#vmware_store_image_dir = /openstack_glance - -# Allow to perform insecure SSL requests to the target system (boolean value) -#vmware_api_insecure = False diff --git a/openstack/usr/share/openstack/glance/glance-cache.conf b/openstack/usr/share/openstack/glance/glance-cache.conf deleted file mode 100644 index 3f2d4603..00000000 --- a/openstack/usr/share/openstack/glance/glance-cache.conf +++ /dev/null @@ -1,200 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -# log_file = /var/log/glance/image-cache.log - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -use_syslog = True - -# Directory that the Image Cache writes data to -image_cache_dir = /var/lib/glance/image-cache/ - -# Number of seconds after which we should consider an incomplete image to be -# stalled and eligible for reaping -image_cache_stall_time = 86400 - -# Max cache size in bytes -image_cache_max_size = 10737418240 - -# Address to find the registry server -registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# Port the registry server is listening on -registry_port = 9191 - -# Auth settings if using Keystone -# auth_url = http://127.0.0.1:5000/v2.0/ -# admin_tenant_name = %SERVICE_TENANT_NAME% -# admin_user = %SERVICE_USER% -# admin_password = %SERVICE_PASSWORD% - -# List of which store classes and store class locations are -# currently known to glance at startup. -# known_stores = glance.store.filesystem.Store, -# glance.store.http.Store, -# glance.store.rbd.Store, -# glance.store.s3.Store, -# glance.store.swift.Store, -# glance.store.sheepdog.Store, -# glance.store.cinder.Store, -# glance.store.vmware_datastore.Store, - -# ============ Filesystem Store Options ======================== - -# Directory that the Filesystem backend store -# writes image data to -filesystem_store_datadir = /var/lib/glance/images/ - -# ============ Swift Store Options ============================= - -# Version of the authentication service to use -# Valid versions are '2' for keystone and '1' for swauth and rackspace -swift_store_auth_version = 2 - -# Address where the Swift authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'https://' -# For swauth, use something like '127.0.0.1:8080/v1.0/' -swift_store_auth_address = 127.0.0.1:5000/v2.0/ - -# User to authenticate against the Swift authentication service -# If you use Swift authentication service, set it to 'account':'user' -# where 'account' is a Swift storage account and 'user' -# is a user in that account -swift_store_user = jdoe:jdoe - -# Auth key for the user authenticating against the -# Swift authentication service -swift_store_key = a86850deb2742ec3cb41518e26aa2d89 - -# Container within the account that the account should use -# for storing images in Swift -swift_store_container = glance - -# Do we create the container if it does not exist? -swift_store_create_container_on_put = False - -# What size, in MB, should Glance start chunking image files -# and do a large object manifest in Swift? By default, this is -# the maximum object size in Swift, which is 5GB -swift_store_large_object_size = 5120 - -# When doing a large object manifest, what size, in MB, should -# Glance write chunks to Swift? This amount of data is written -# to a temporary disk buffer during the process of chunking -# the image file, and the default is 200MB -swift_store_large_object_chunk_size = 200 - -# Whether to use ServiceNET to communicate with the Swift storage servers. -# (If you aren't RACKSPACE, leave this False!) -# -# To use ServiceNET for authentication, prefix hostname of -# `swift_store_auth_address` with 'snet-'. -# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ -swift_enable_snet = False - -# ============ S3 Store Options ============================= - -# Address where the S3 authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'http://' -s3_store_host = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the S3 authentication service -s3_store_access_key = <20-char AWS access key> - -# Auth key for the user authenticating against the -# S3 authentication service -s3_store_secret_key = <40-char AWS secret key> - -# Container within the account that the account should use -# for storing images in S3. Note that S3 has a flat namespace, -# so you need a unique bucket name for your glance images. An -# easy way to do this is append your AWS access key to "glance". -# S3 buckets in AWS *must* be lowercased, so remember to lowercase -# your AWS access key if you use it in your bucket name below! -s3_store_bucket = glance - -# Do we create the bucket if it does not exist? -s3_store_create_bucket_on_put = False - -# When sending images to S3, the data will first be written to a -# temporary buffer on disk. By default the platform's temporary directory -# will be used. If required, an alternative directory can be specified here. -# s3_store_object_buffer_dir = /path/to/dir - -# ============ Cinder Store Options =========================== - -# Info to match when looking for cinder in the service catalog -# Format is : separated values of the form: -# :: (string value) -#cinder_catalog_info = volume:cinder:publicURL - -# Override service catalog lookup with template for cinder endpoint -# e.g. http://localhost:8776/v1/%(project_id)s (string value) -#cinder_endpoint_template = - -# Region name of this node (string value) -#os_region_name = - -# Location of ca certicates file to use for cinder client requests -# (string value) -#cinder_ca_certificates_file = - -# Number of cinderclient retries on failed http calls (integer value) -#cinder_http_retries = 3 - -# Allow to perform insecure SSL requests to cinder (boolean value) -#cinder_api_insecure = False - -# ============ VMware Datastore Store Options ===================== - -# ESX/ESXi or vCenter Server target system. -# The server value can be an IP address or a DNS name -# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com -#vmware_server_host = - -# Server username (string value) -#vmware_server_username = - -# Server password (string value) -#vmware_server_password = - -# Inventory path to a datacenter (string value) -# Value optional when vmware_server_ip is an ESX/ESXi host: if specified -# should be `ha-datacenter`. -#vmware_datacenter_path = - -# Datastore associated with the datacenter (string value) -#vmware_datastore_name = - -# The number of times we retry on failures -# e.g., socket error, etc (integer value) -#vmware_api_retry_count = 10 - -# The interval used for polling remote tasks -# invoked on VMware ESX/VC server in seconds (integer value) -#vmware_task_poll_interval = 5 - -# Absolute path of the folder containing the images in the datastore -# (string value) -#vmware_store_image_dir = /openstack_glance - -# Allow to perform insecure SSL requests to the target system (boolean value) -#vmware_api_insecure = False - -# ================= Security Options ========================== - -# AES key for encrypting store 'location' metadata, including -# -- if used -- Swift or S3 credentials -# Should be set to a random string of length 16, 24 or 32 bytes -# metadata_encryption_key = <16, 24 or 32 char registry metadata key> diff --git a/openstack/usr/share/openstack/glance/glance-registry-paste.ini b/openstack/usr/share/openstack/glance/glance-registry-paste.ini deleted file mode 100644 index df403f6e..00000000 --- a/openstack/usr/share/openstack/glance/glance-registry-paste.ini +++ /dev/null @@ -1,30 +0,0 @@ -# Use this pipeline for no auth - DEFAULT -[pipeline:glance-registry] -pipeline = osprofiler unauthenticated-context registryapp - -# Use this pipeline for keystone auth -[pipeline:glance-registry-keystone] -pipeline = osprofiler authtoken context registryapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user. -[pipeline:glance-registry-trusted-auth] -pipeline = osprofiler context registryapp - -[app:registryapp] -paste.app_factory = glance.registry.api:API.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory -hmac_keys = SECRET_KEY -enabled = yes diff --git a/openstack/usr/share/openstack/glance/glance-registry.conf b/openstack/usr/share/openstack/glance/glance-registry.conf deleted file mode 100644 index 302f4138..00000000 --- a/openstack/usr/share/openstack/glance/glance-registry.conf +++ /dev/null @@ -1,245 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Address to bind the registry server -bind_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# Port the bind the registry server to -bind_port = 9191 - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -# log_file = /var/log/glance/registry.log - -# Backlog requests when creating socket -backlog = 4096 - -# TCP_KEEPIDLE value in seconds when creating socket. -# Not supported on OS X. -#tcp_keepidle = 600 - -# API to use for accessing data. Default value points to sqlalchemy -# package. -#data_api = glance.db.sqlalchemy.api - -# The number of child process workers that will be -# created to service Registry requests. The default will be -# equal to the number of CPUs available. (integer value) -#workers = None - -# Enable Registry API versions individually or simultaneously -#enable_v1_registry = True -#enable_v2_registry = True - -# Limit the api to return `param_limit_max` items in a call to a container. If -# a larger `limit` query param is provided, it will be reduced to this value. -api_limit_max = 1000 - -# If a `limit` query param is not provided in an api request, it will -# default to `limit_param_default` -limit_param_default = 25 - -# Role used to identify an authenticated user as administrator -#admin_role = admin - -# Whether to automatically create the database tables. -# Default: False -#db_auto_create = False - -# Enable DEBUG log messages from sqlalchemy which prints every database -# query and response. -# Default: False -#sqlalchemy_debug = True - -# ================= Syslog Options ============================ - -# Send logs to syslog (/dev/log) instead of to file specified -# by `log_file` -use_syslog = True - -# Facility to use. If unset defaults to LOG_USER. -#syslog_log_facility = LOG_LOCAL1 - -# ================= SSL Options =============================== - -# Certificate file to use when starting registry server securely -#cert_file = /path/to/certfile - -# Private key file to use when starting registry server securely -#key_file = /path/to/keyfile - -# CA certificate file to use to verify connecting clients -#ca_file = /path/to/cafile - -# ============ Notification System Options ===================== - -# Driver or drivers to handle sending notifications. Set to -# 'messaging' to send notifications to a message queue. -notification_driver = messagingv2 - -# Default publisher_id for outgoing notifications. -# default_publisher_id = image.localhost - -# Messaging driver used for 'messaging' notifications driver -rpc_backend=rabbit - -# Configuration options if sending notifications via rabbitmq -rabbit_host = {{ RABBITMQ_HOST }} -rabbit_port = {{ RABBITMQ_PORT }} -rabbit_use_ssl = false -rabbit_userid = {{ RABBITMQ_USER }} -rabbit_password = {{ RABBITMQ_PASSWORD }} -rabbit_virtual_host = / -rabbit_notification_exchange = glance -rabbit_notification_topic = notifications -rabbit_durable_queues = False - -# Configuration options if sending notifications via Qpid (these are -# the defaults) -qpid_notification_exchange = glance -qpid_notification_topic = notifications -qpid_hostname = localhost -qpid_port = 5672 -qpid_username = -qpid_password = -qpid_sasl_mechanisms = -qpid_reconnect_timeout = 0 -qpid_reconnect_limit = 0 -qpid_reconnect_interval_min = 0 -qpid_reconnect_interval_max = 0 -qpid_reconnect_interval = 0 -qpid_heartbeat = 5 -# Set to 'ssl' to enable SSL -qpid_protocol = tcp -qpid_tcp_nodelay = True - - -# ================= Database Options ========================== - -[database] -# The file name to use with SQLite (string value) -#sqlite_db = glance.sqlite - -# If True, SQLite uses synchronous mode (boolean value) -#sqlite_synchronous = True - -# The backend to use for db (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string used to connect to the -# database (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = -connection=postgresql://{{ GLANCE_DB_USER }}:{{ GLANCE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/glance - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle sql connections are reaped (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = - -# Maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a sql connection -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on -# connection lost (boolean value) -#use_db_reconnect = False - -# seconds between db connection retries (integer value) -#db_retry_interval = 1 - -# Whether to increase interval between db connection retries, -# up to db_max_retry_interval (boolean value) -#db_inc_retry_interval = True - -# max seconds between db connection retries, if -# db_inc_retry_interval is enabled (integer value) -#db_max_retry_interval = 10 - -# maximum db connection retries before error is raised. -# (setting -1 implies an infinite retry count) (integer value) -#db_max_retries = 20 - -[keystone_authtoken] -auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 -identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 -admin_tenant_name = service -admin_user = {{ GLANCE_SERVICE_USER }} -admin_password = {{ GLANCE_SERVICE_PASSWORD }} - -[paste_deploy] -# Name of the paste configuration file that defines the available pipelines -#config_file = glance-registry-paste.ini - -# Partial name of a pipeline in your paste configuration file with the -# service name removed. For example, if your paste section name is -# [pipeline:glance-registry-keystone], you would configure the flavor below -# as 'keystone'. -flavor=keystone - -[profiler] -# If False fully disable profiling feature. -#enabled = False - -# If False doesn't trace SQL requests. -#trace_sqlalchemy = False diff --git a/openstack/usr/share/openstack/glance/glance-scrubber.conf b/openstack/usr/share/openstack/glance/glance-scrubber.conf deleted file mode 100644 index cdbfda71..00000000 --- a/openstack/usr/share/openstack/glance/glance-scrubber.conf +++ /dev/null @@ -1,108 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -# log_file = /var/log/glance/scrubber.log - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -use_syslog = True - -# Should we run our own loop or rely on cron/scheduler to run us -daemon = False - -# Loop time between checking for new items to schedule for delete -wakeup_time = 300 - -# Directory that the scrubber will use to remind itself of what to delete -# Make sure this is also set in glance-api.conf -scrubber_datadir = /var/lib/glance/scrubber - -# Only one server in your deployment should be designated the cleanup host -cleanup_scrubber = False - -# pending_delete items older than this time are candidates for cleanup -cleanup_scrubber_time = 86400 - -# Address to find the registry server for cleanups -registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# Port the registry server is listening on -registry_port = 9191 - -# Auth settings if using Keystone -# auth_url = http://127.0.0.1:5000/v2.0/ -# admin_tenant_name = %SERVICE_TENANT_NAME% -# admin_user = %SERVICE_USER% -# admin_password = %SERVICE_PASSWORD% - -# Directory to use for lock files. Default to a temp directory -# (string value). This setting needs to be the same for both -# glance-scrubber and glance-api. -#lock_path= - -# API to use for accessing data. Default value points to sqlalchemy -# package, it is also possible to use: glance.db.registry.api -#data_api = glance.db.sqlalchemy.api - -# ================= Security Options ========================== - -# AES key for encrypting store 'location' metadata, including -# -- if used -- Swift or S3 credentials -# Should be set to a random string of length 16, 24 or 32 bytes -#metadata_encryption_key = <16, 24 or 32 char registry metadata key> - -# ================= Database Options ===============+========== - -[database] - -# The SQLAlchemy connection string used to connect to the -# database (string value) -#connection=sqlite:////glance/openstack/common/db/$sqlite_db - -# The SQLAlchemy connection string used to connect to the -# slave database (string value) -#slave_connection= - -# timeout before idle sql connections are reaped (integer -# value) -#idle_timeout=3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -#min_pool_size=1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -#max_pool_size= - -# maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -#max_retries=10 - -# interval between retries of opening a sql connection -# (integer value) -#retry_interval=10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -#max_overflow= - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -#connection_debug=0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -#connection_trace=false - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -#pool_timeout= diff --git a/openstack/usr/share/openstack/glance/logging.conf b/openstack/usr/share/openstack/glance/logging.conf deleted file mode 100644 index 7e7f31f0..00000000 --- a/openstack/usr/share/openstack/glance/logging.conf +++ /dev/null @@ -1,54 +0,0 @@ -[loggers] -keys=root,api,registry,combined - -[formatters] -keys=normal,normal_with_name,debug - -[handlers] -keys=production,file,devel - -[logger_root] -level=NOTSET -handlers=devel - -[logger_api] -level=DEBUG -handlers=devel -qualname=glance-api - -[logger_registry] -level=DEBUG -handlers=devel -qualname=glance-registry - -[logger_combined] -level=DEBUG -handlers=devel -qualname=glance-combined - -[handler_production] -class=handlers.SysLogHandler -level=ERROR -formatter=normal_with_name -args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) - -[handler_file] -class=FileHandler -level=DEBUG -formatter=normal_with_name -args=('glance.log', 'w') - -[handler_devel] -class=StreamHandler -level=NOTSET -formatter=debug -args=(sys.stdout,) - -[formatter_normal] -format=%(asctime)s %(levelname)s %(message)s - -[formatter_normal_with_name] -format=(%(name)s): %(asctime)s %(levelname)s %(message)s - -[formatter_debug] -format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/openstack/usr/share/openstack/glance/policy.json b/openstack/usr/share/openstack/glance/policy.json deleted file mode 100644 index 325f00b2..00000000 --- a/openstack/usr/share/openstack/glance/policy.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "context_is_admin": "role:admin", - "default": "", - - "add_image": "", - "delete_image": "", - "get_image": "", - "get_images": "", - "modify_image": "", - "publicize_image": "role:admin", - "copy_from": "", - - "download_image": "", - "upload_image": "", - - "delete_image_location": "", - "get_image_location": "", - "set_image_location": "", - - "add_member": "", - "delete_member": "", - "get_member": "", - "get_members": "", - "modify_member": "", - - "manage_image_cache": "role:admin", - - "get_task": "", - "get_tasks": "", - "add_task": "", - "modify_task": "", - - "get_metadef_namespace": "", - "get_metadef_namespaces":"", - "modify_metadef_namespace":"", - "add_metadef_namespace":"", - - "get_metadef_object":"", - "get_metadef_objects":"", - "modify_metadef_object":"", - "add_metadef_object":"", - - "list_metadef_resource_types":"", - "get_metadef_resource_type":"", - "add_metadef_resource_type_association":"", - - "get_metadef_property":"", - "get_metadef_properties":"", - "modify_metadef_property":"", - "add_metadef_property":"" - -} diff --git a/openstack/usr/share/openstack/glance/schema-image.json b/openstack/usr/share/openstack/glance/schema-image.json deleted file mode 100644 index 5aafd6b3..00000000 --- a/openstack/usr/share/openstack/glance/schema-image.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "kernel_id": { - "type": "string", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." - }, - "ramdisk_id": { - "type": "string", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." - }, - "instance_uuid": { - "type": "string", - "description": "ID of instance used to create this image." - }, - "architecture": { - "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", - "type": "string" - }, - "os_distro": { - "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", - "type": "string" - }, - "os_version": { - "description": "Operating system version as specified by the distributor", - "type": "string" - } -} diff --git a/openstack/usr/share/openstack/horizon.yml b/openstack/usr/share/openstack/horizon.yml deleted file mode 100644 index 14cea5c5..00000000 --- a/openstack/usr/share/openstack/horizon.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- hosts: localhost - tasks: - -# Setup apache, this may end up in apache.yml - - name: Create the apache user. - user: - name: apache - comment: Apache Server - shell: /sbin/nologin - home: /var/www - - - file: - path: /usr/sbin/suexec - group: apache - mode: 4750 - -# Setup horizon - - name: Create the horizon user. - user: - name: horizon - comment: Openstack Horizon User - shell: /sbin/nologin - home: /var/lib/horizon - - - name: Create the /var folders for horizon - file: - path: "{{ item }}" - state: directory - owner: horizon - group: horizon - with_items: - - /var/lib/horizon - - - name: Link horizon apache configuration - file: - src: /etc/horizon/apache-horizon.conf - dest: /etc/httpd/conf.d/apache-horizon.conf - state: link - - - name: Enable and start apache services needed by horizon - service: - name: "{{ item }}" - enabled: yes - state: started - with_items: - - apache-httpd.service diff --git a/openstack/usr/share/openstack/hosts b/openstack/usr/share/openstack/hosts deleted file mode 100644 index 5b97818d..00000000 --- a/openstack/usr/share/openstack/hosts +++ /dev/null @@ -1 +0,0 @@ -localhost ansible_connection=local diff --git a/openstack/usr/share/openstack/ironic.yml b/openstack/usr/share/openstack/ironic.yml deleted file mode 100644 index db0a8aa8..00000000 --- a/openstack/usr/share/openstack/ironic.yml +++ /dev/null @@ -1,104 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/ironic.conf" - tasks: - - name: Create the ironic user - user: - name: ironic - comment: Openstack Ironic Daemons - shell: /sbin/nologin - home: /var/lib/ironic - - - name: Create the /var folders for Ironic - file: - path: "{{ item }}" - state: directory - owner: ironic - group: ironic - with_items: - - /var/run/ironic - - /var/lock/ironic - - /var/log/ironic - - /var/lib/ironic - - - file: path=/etc/ironic state=directory - - name: Add the configuration needed for ironic in /etc/ironic using templates - template: - src: /usr/share/openstack/ironic/{{ item }} - dest: /etc/ironic/{{ item }} - with_lines: - - cd /usr/share/openstack/ironic && find -type f - - - name: Create Ironic service user in service tenant - keystone_user: - user: "{{ IRONIC_SERVICE_USER }}" - password: "{{ IRONIC_SERVICE_PASSWORD }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Assign admin role to Ironic service user in the service tenant - keystone_user: - role: admin - user: "{{ IRONIC_SERVICE_USER }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Add Ironic endpoint - keystone_service: - name: ironic - type: baremetal - description: Openstack Ironic Service - publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385' - internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385' - adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385' - region: 'regionOne' - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Create postgresql user for Ironic - postgresql_user: - name: "{{ IRONIC_DB_USER }}" - login_host: "{{ CONTROLLER_HOST_ADDRESS }}" - password: "{{ IRONIC_DB_PASSWORD }}" - sudo: yes - sudo_user: ironic - - - name: Create database for Ironic services - postgresql_db: - name: ironic - owner: "{{ IRONIC_DB_USER }}" - login_host: "{{ CONTROLLER_HOST_ADDRESS }}" - sudo: yes - sudo_user: ironic - - - name: Initiate Ironic database - # Use 'upgrade' instead of 'create_schema' to make the operation - # idempotent - shell: | - ironic-dbsync \ - --config-file /etc/ironic/ironic.conf upgrade - sudo: yes - sudo_user: ironic - - - name: Enable and start openstack-ironic services - service: - name: "{{ item }}" - enabled: yes - state: started - with_items: - - openstack-ironic-conductor.service - - openstack-ironic-api.service - - - name: Set owner and group for the tftp root directory - file: - path: "/srv/tftp_root/" - state: directory - owner: ironic - group: ironic - recurse: yes - - - name: Enable and start tftp-hpa - service: - name: tftp-hpa.socket - enabled: yes - state: started diff --git a/openstack/usr/share/openstack/ironic/ironic.conf b/openstack/usr/share/openstack/ironic/ironic.conf deleted file mode 100644 index 75c62b8e..00000000 --- a/openstack/usr/share/openstack/ironic/ironic.conf +++ /dev/null @@ -1,1247 +0,0 @@ -[DEFAULT] - -# -# Options defined in oslo.messaging -# - -# Use durable queues in amqp. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues=false - -# Auto-delete queues in amqp. (boolean value) -#amqp_auto_delete=false - -# Size of RPC connection pool. (integer value) -#rpc_conn_pool_size=30 - -# Qpid broker hostname. (string value) -#qpid_hostname=localhost - -# Qpid broker port. (integer value) -#qpid_port=5672 - -# Qpid HA cluster host:port pairs. (list value) -#qpid_hosts=$qpid_hostname:$qpid_port - -# Username for Qpid connection. (string value) -#qpid_username= - -# Password for Qpid connection. (string value) -#qpid_password= - -# Space separated list of SASL mechanisms to use for auth. -# (string value) -#qpid_sasl_mechanisms= - -# Seconds between connection keepalive heartbeats. (integer -# value) -#qpid_heartbeat=60 - -# Transport to use, either 'tcp' or 'ssl'. (string value) -#qpid_protocol=tcp - -# Whether to disable the Nagle algorithm. (boolean value) -#qpid_tcp_nodelay=true - -# The number of prefetched messages held by receiver. (integer -# value) -#qpid_receiver_capacity=1 - -# The qpid topology version to use. Version 1 is what was -# originally used by impl_qpid. Version 2 includes some -# backwards-incompatible changes that allow broker federation -# to work. Users should update to version 2 when they are -# able to take everything down, as it requires a clean break. -# (integer value) -#qpid_topology_version=1 - -# SSL version to use (valid only if SSL enabled). valid values -# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some -# distributions. (string value) -#kombu_ssl_version= - -# SSL key file (valid only if SSL enabled). (string value) -#kombu_ssl_keyfile= - -# SSL cert file (valid only if SSL enabled). (string value) -#kombu_ssl_certfile= - -# SSL certification authority file (valid only if SSL -# enabled). (string value) -#kombu_ssl_ca_certs= - -# How long to wait before reconnecting in response to an AMQP -# consumer cancel notification. (floating point value) -#kombu_reconnect_delay=1.0 - -# The RabbitMQ broker address where a single node is used. -# (string value) -rabbit_host={{ RABBITMQ_HOST }} - -# The RabbitMQ broker port where a single node is used. -# (integer value) -rabbit_port={{ RABBITMQ_PORT }} - -# RabbitMQ HA cluster host:port pairs. (list value) -#rabbit_hosts=$rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -#rabbit_use_ssl=false - -# The RabbitMQ userid. (string value) -rabbit_userid={{ RABBITMQ_USER }} - -# The RabbitMQ password. (string value) -rabbit_password={{ RABBITMQ_PASSWORD }} - -# the RabbitMQ login method (string value) -#rabbit_login_method=AMQPLAIN - -# The RabbitMQ virtual host. (string value) -#rabbit_virtual_host=/ - -# How frequently to retry connecting with RabbitMQ. (integer -# value) -#rabbit_retry_interval=1 - -# How long to backoff for between retries when connecting to -# RabbitMQ. (integer value) -#rabbit_retry_backoff=2 - -# Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -#rabbit_max_retries=0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change -# this option, you must wipe the RabbitMQ database. (boolean -# value) -#rabbit_ha_queues=false - -# If passed, use a fake RabbitMQ provider. (boolean value) -#fake_rabbit=false - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet -# interface, or IP. The "host" option should point or resolve -# to this address. (string value) -#rpc_zmq_bind_address=* - -# MatchMaker driver. (string value) -#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost - -# ZeroMQ receiver listening port. (integer value) -#rpc_zmq_port=9501 - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -#rpc_zmq_contexts=1 - -# Maximum number of ingress messages to locally buffer per -# topic. Default is unlimited. (integer value) -#rpc_zmq_topic_backlog= - -# Directory for holding IPC sockets. (string value) -#rpc_zmq_ipc_dir=/var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP -# address. Must match "host" option, if running Nova. (string -# value) -#rpc_zmq_host=ironic - -# Seconds to wait before a cast expires (TTL). Only supported -# by impl_zmq. (integer value) -#rpc_cast_timeout=30 - -# Heartbeat frequency. (integer value) -#matchmaker_heartbeat_freq=300 - -# Heartbeat time-to-live. (integer value) -#matchmaker_heartbeat_ttl=600 - -# Size of RPC greenthread pool. (integer value) -#rpc_thread_pool_size=64 - -# Driver or drivers to handle sending notifications. (multi -# valued) -#notification_driver= - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -#notification_topics=notifications - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout=60 - -# A URL representing the messaging driver to use and its full -# configuration. If not set, we fall back to the rpc_backend -# option and driver specific configuration. (string value) -#transport_url= - -# The messaging driver to use, defaults to rabbit. Other -# drivers include qpid and zmq. (string value) -#rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the -# transport_url option. (string value) -#control_exchange=openstack - - -# -# Options defined in ironic.netconf -# - -# IP address of this host. (string value) -my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# Use IPv6. (boolean value) -#use_ipv6=false - - -# -# Options defined in ironic.api.app -# - -# Method to use for authentication: noauth or keystone. -# (string value) -#auth_strategy=keystone - - -# -# Options defined in ironic.common.driver_factory -# - -# Specify the list of drivers to load during service -# initialization. Missing drivers, or drivers which fail to -# initialize, will prevent the conductor service from -# starting. The option default is a recommended set of -# production-oriented drivers. A complete list of drivers -# present on your system may be found by enumerating the -# "ironic.drivers" entrypoint. An example may be found in the -# developer documentation online. (list value) -enabled_drivers=pxe_ipmitool,pxe_ssh - - -# -# Options defined in ironic.common.exception -# - -# Make exception message format errors fatal. (boolean value) -#fatal_exception_format_errors=false - - -# -# Options defined in ironic.common.hash_ring -# - -# Exponent to determine number of hash partitions to use when -# distributing load across conductors. Larger values will -# result in more even distribution of load and less load when -# rebalancing the ring, but more memory usage. Number of -# partitions per conductor is (2^hash_partition_exponent). -# This determines the granularity of rebalancing: given 10 -# hosts, and an exponent of the 2, there are 40 partitions in -# the ring.A few thousand partitions should make rebalancing -# smooth in most cases. The default is suitable for up to a -# few hundred conductors. Too many partitions has a CPU -# impact. (integer value) -#hash_partition_exponent=5 - -# [Experimental Feature] Number of hosts to map onto each hash -# partition. Setting this to more than one will cause -# additional conductor services to prepare deployment -# environments and potentially allow the Ironic cluster to -# recover more quickly if a conductor instance is terminated. -# (integer value) -#hash_distribution_replicas=1 - - -# -# Options defined in ironic.common.images -# - -# Force backing images to raw format. (boolean value) -#force_raw_images=true - -# Path to isolinux binary file. (string value) -#isolinux_bin=/usr/lib/syslinux/isolinux.bin - -# Template file for isolinux configuration file. (string -# value) -#isolinux_config_template=$pybasedir/common/isolinux_config.template - - -# -# Options defined in ironic.common.paths -# - -# Directory where the ironic python module is installed. -# (string value) -#pybasedir=/usr/lib/python/site-packages/ironic - -# Directory where ironic binaries are installed. (string -# value) -#bindir=$pybasedir/bin - -# Top-level directory for maintaining ironic's state. (string -# value) -#state_path=$pybasedir - - -# -# Options defined in ironic.common.policy -# - -# JSON file representing policy. (string value) -#policy_file=policy.json - -# Rule checked when requested rule is not found. (string -# value) -#policy_default_rule=default - - -# -# Options defined in ironic.common.service -# - -# Seconds between running periodic tasks. (integer value) -#periodic_interval=60 - -# Name of this node. This can be an opaque identifier. It is -# not necessarily a hostname, FQDN, or IP address. However, -# the node name must be valid within an AMQP key, and if using -# ZeroMQ, a valid hostname, FQDN, or IP address. (string -# value) -#host=ironic - - -# -# Options defined in ironic.common.utils -# - -# Path to the rootwrap configuration file to use for running -# commands as root. (string value) -#rootwrap_config=/etc/ironic/rootwrap.conf - -# Explicitly specify the temporary working directory. (string -# value) -#tempdir= - - -# -# Options defined in ironic.drivers.modules.image_cache -# - -# Run image downloads and raw format conversions in parallel. -# (boolean value) -#parallel_image_downloads=false - - -# -# Options defined in ironic.openstack.common.eventlet_backdoor -# - -# Enable eventlet backdoor. Acceptable values are 0, , -# and :, where 0 results in listening on a random -# tcp port number; results in listening on the -# specified port number (and not enabling backdoor if that -# port is in use); and : results in listening on -# the smallest unused port number within the specified range -# of port numbers. The chosen port is displayed in the -# service's log file. (string value) -#backdoor_port= - - -# -# Options defined in ironic.openstack.common.lockutils -# - -# Enables or disables inter-process locks. (boolean value) -#disable_process_locking=false - -# Directory to use for lock files. (string value) -#lock_path= - - -# -# Options defined in ironic.openstack.common.log -# - -# Print debugging output (set logging level to DEBUG instead -# of default WARNING level). (boolean value) -#debug=false - -# Print more verbose output (set logging level to INFO instead -# of default WARNING level). (boolean value) -#verbose=false - -# Log output to standard error. (boolean value) -#use_stderr=true - -# Format string to use for log messages with context. (string -# value) -#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages without context. -# (string value) -#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG. (string -# value) -#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. -# (string value) -#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s - -# List of logger=LEVEL pairs. (list value) -#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN - -# Enables or disables publication of error events. (boolean -# value) -#publish_errors=false - -# Enables or disables fatal status of deprecations. (boolean -# value) -#fatal_deprecations=false - -# The format for an instance that is passed with the log -# message. (string value) -#instance_format="[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log -# message. (string value) -#instance_uuid_format="[instance: %(uuid)s] " - -# The name of a logging configuration file. This file is -# appended to any existing logging configuration files. For -# details about logging configuration files, see the Python -# logging module documentation. (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append= - -# DEPRECATED. A logging.Formatter log message format string -# which may use any of the available logging.LogRecord -# attributes. This option is deprecated. Please use -# logging_context_format_string and -# logging_default_format_string instead. (string value) -#log_format= - -# Format string for %%(asctime)s in log records. Default: -# %(default)s . (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If no default is -# set, logging will go to stdout. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file= - -# (Optional) The base directory used for relative --log-file -# paths. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir= - -# Use syslog for logging. Existing syslog format is DEPRECATED -# during I, and will change in J to honor RFC5424. (boolean -# value) -use_syslog=True - -# (Optional) Enables or disables syslog rfc5424 format for -# logging. If enabled, prefixes the MSG part of the syslog -# message with APP-NAME (RFC5424). The format without the APP- -# NAME is deprecated in I, and will be removed in J. (boolean -# value) -#use_syslog_rfc_format=false - -# Syslog facility to receive log lines. (string value) -#syslog_log_facility=LOG_USER - - -# -# Options defined in ironic.openstack.common.periodic_task -# - -# Some periodic tasks can be run in a separate process. Should -# we run them here? (boolean value) -#run_external_periodic_tasks=true - - -[agent] - -# -# Options defined in ironic.drivers.modules.agent -# - -# Additional append parameters for baremetal PXE boot. (string -# value) -#agent_pxe_append_params=nofb nomodeset vga=normal - -# Template file for PXE configuration. (string value) -#agent_pxe_config_template=$pybasedir/drivers/modules/agent_config.template - -# Neutron bootfile DHCP parameter. (string value) -#agent_pxe_bootfile_name=pxelinux.0 - -# Maximum interval (in seconds) for agent heartbeats. (integer -# value) -#heartbeat_timeout=300 - - -# -# Options defined in ironic.drivers.modules.agent_client -# - -# API version to use for communicating with the ramdisk agent. -# (string value) -#agent_api_version=v1 - - -[api] - -# -# Options defined in ironic.api -# - -# The listen IP for the Ironic API server. (string value) -#host_ip=0.0.0.0 - -# The port for the Ironic API server. (integer value) -#port=6385 - -# The maximum number of items returned in a single response -# from a collection resource. (integer value) -#max_limit=1000 - - -[conductor] - -# -# Options defined in ironic.conductor.manager -# - -# URL of Ironic API service. If not set ironic can get the -# current value from the keystone service catalog. (string -# value) -api_url=http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:6385 - -# Seconds between conductor heart beats. (integer value) -#heartbeat_interval=10 - -# Maximum time (in seconds) since the last check-in of a -# conductor. (integer value) -#heartbeat_timeout=60 - -# Interval between syncing the node power state to the -# database, in seconds. (integer value) -#sync_power_state_interval=60 - -# Interval between checks of provision timeouts, in seconds. -# (integer value) -#check_provision_state_interval=60 - -# Timeout (seconds) for waiting callback from deploy ramdisk. -# 0 - unlimited. (integer value) -#deploy_callback_timeout=1800 - -# During sync_power_state, should the hardware power state be -# set to the state recorded in the database (True) or should -# the database be updated based on the hardware state (False). -# (boolean value) -#force_power_state_during_sync=true - -# During sync_power_state failures, limit the number of times -# Ironic should try syncing the hardware node power state with -# the node power state in DB (integer value) -#power_state_sync_max_retries=3 - -# Maximum number of worker threads that can be started -# simultaneously by a periodic task. Should be less than RPC -# thread pool size. (integer value) -#periodic_max_workers=8 - -# The size of the workers greenthread pool. (integer value) -#workers_pool_size=100 - -# Number of attempts to grab a node lock. (integer value) -#node_locked_retry_attempts=3 - -# Seconds to sleep between node lock attempts. (integer value) -#node_locked_retry_interval=1 - -# Enable sending sensor data message via the notification bus -# (boolean value) -#send_sensor_data=false - -# Seconds between conductor sending sensor data message to -# ceilometer via the notification bus. (integer value) -#send_sensor_data_interval=600 - -# List of comma separated metric types which need to be sent -# to Ceilometer. The default value, "ALL", is a special value -# meaning send all the sensor data. (list value) -#send_sensor_data_types=ALL - -# When conductors join or leave the cluster, existing -# conductors may need to update any persistent local state as -# nodes are moved around the cluster. This option controls how -# often, in seconds, each conductor will check for nodes that -# it should "take over". Set it to a negative value to disable -# the check entirely. (integer value) -#sync_local_state_interval=180 - - -[console] - -# -# Options defined in ironic.drivers.modules.console_utils -# - -# Path to serial console terminal program (string value) -#terminal=shellinaboxd - -# Directory containing the terminal SSL cert(PEM) for serial -# console access (string value) -#terminal_cert_dir= - -# Directory for holding terminal pid files. If not specified, -# the temporary directory will be used. (string value) -#terminal_pid_dir= - -# Time interval (in seconds) for checking the status of -# console subprocess. (integer value) -#subprocess_checking_interval=1 - -# Time (in seconds) to wait for the console subprocess to -# start. (integer value) -#subprocess_timeout=10 - - -[database] - -# -# Options defined in oslo.db -# - -# The file name to use with SQLite. (string value) -#sqlite_db=oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -#sqlite_synchronous=true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend=sqlalchemy - -# The SQLAlchemy connection string to use to connect to the -# database. (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -connection=postgresql://{{ IRONIC_DB_USER}}:{{ IRONIC_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/ironic - -# The SQLAlchemy connection string to use to connect to the -# slave database. (string value) -#slave_connection= - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode=TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout=3600 - -# Minimum number of SQL connections to keep open in a pool. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size=1 - -# Maximum number of SQL connections to keep open in a pool. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size= - -# Maximum db connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries=10 - -# Interval between retries of opening a SQL connection. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval=10 - -# If set, use this value for max_overflow with SQLAlchemy. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow= - -# Verbosity of SQL debugging information: 0=None, -# 100=Everything. (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug=0 - -# Add Python stack traces to SQL as comment strings. (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace=false - -# If set, use this value for pool_timeout with SQLAlchemy. -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout= - -# Enable the experimental use of database reconnect on -# connection lost. (boolean value) -#use_db_reconnect=false - -# Seconds between database connection retries. (integer value) -#db_retry_interval=1 - -# If True, increases the interval between database connection -# retries up to db_max_retry_interval. (boolean value) -#db_inc_retry_interval=true - -# If db_inc_retry_interval is set, the maximum seconds between -# database connection retries. (integer value) -#db_max_retry_interval=10 - -# Maximum database connection retries before error is raised. -# Set to -1 to specify an infinite retry count. (integer -# value) -#db_max_retries=20 - - -# -# Options defined in ironic.db.sqlalchemy.models -# - -# MySQL engine to use. (string value) -#mysql_engine=InnoDB - - -[dhcp] - -# -# Options defined in ironic.common.dhcp_factory -# - -# DHCP provider to use. "neutron" uses Neutron, and "none" -# uses a no-op provider. (string value) -#dhcp_provider=neutron - - -[disk_partitioner] - -# -# Options defined in ironic.common.disk_partitioner -# - -# After Ironic has completed creating the partition table, it -# continues to check for activity on the attached iSCSI device -# status at this interval prior to copying the image to the -# node, in seconds (integer value) -#check_device_interval=1 - -# The maximum number of times to check that the device is not -# accessed by another process. If the device is still busy -# after that, the disk partitioning will be treated as having -# failed. (integer value) -#check_device_max_retries=20 - - -[glance] - -# -# Options defined in ironic.common.glance_service.v2.image_service -# - -# A list of URL schemes that can be downloaded directly via -# the direct_url. Currently supported schemes: [file]. (list -# value) -#allowed_direct_url_schemes= - -# The secret token given to Swift to allow temporary URL -# downloads. Required for temporary URLs. (string value) -#swift_temp_url_key= - -# The length of time in seconds that the temporary URL will be -# valid for. Defaults to 20 minutes. If some deploys get a 401 -# response code when trying to download from the temporary -# URL, try raising this duration. (integer value) -#swift_temp_url_duration=1200 - -# The "endpoint" (scheme, hostname, optional port) for the -# Swift URL of the form -# "endpoint_url/api_version/account/container/object_id". Do -# not include trailing "/". For example, use -# "https://swift.example.com". Required for temporary URLs. -# (string value) -#swift_endpoint_url= - -# The Swift API version to create a temporary URL for. -# Defaults to "v1". Swift temporary URL format: -# "endpoint_url/api_version/account/container/object_id" -# (string value) -#swift_api_version=v1 - -# The account that Glance uses to communicate with Swift. The -# format is "AUTH_uuid". "uuid" is the UUID for the account -# configured in the glance-api.conf. Required for temporary -# URLs. For example: -# "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". Swift temporary -# URL format: -# "endpoint_url/api_version/account/container/object_id" -# (string value) -#swift_account= - -# The Swift container Glance is configured to store its images -# in. Defaults to "glance", which is the default in glance- -# api.conf. Swift temporary URL format: -# "endpoint_url/api_version/account/container/object_id" -# (string value) -#swift_container=glance - - -# -# Options defined in ironic.common.image_service -# - -# Default glance hostname or IP address. (string value) -glance_host={{ CONTROLLER_HOST_ADDRESS }} - -# Default glance port. (integer value) -#glance_port=9292 - -# Default protocol to use when connecting to glance. Set to -# https for SSL. (string value) -#glance_protocol=http - -# A list of the glance api servers available to ironic. Prefix -# with https:// for SSL-based glance API servers. Format is -# [hostname|IP]:port. (string value) -#glance_api_servers= - -# Allow to perform insecure SSL (https) requests to glance. -# (boolean value) -#glance_api_insecure=false - -# Number of retries when downloading an image from glance. -# (integer value) -#glance_num_retries=0 - -# Default protocol to use when connecting to glance. Set to -# https for SSL. (string value) -#auth_strategy=keystone - - -[ilo] - -# -# Options defined in ironic.drivers.modules.ilo.common -# - -# Timeout (in seconds) for iLO operations (integer value) -#client_timeout=60 - -# Port to be used for iLO operations (integer value) -#client_port=443 - -# The Swift iLO container to store data. (string value) -#swift_ilo_container=ironic_ilo_container - -# Amount of time in seconds for Swift objects to auto-expire. -# (integer value) -#swift_object_expiry_timeout=900 - - -# -# Options defined in ironic.drivers.modules.ilo.power -# - -# Number of times a power operation needs to be retried -# (integer value) -#power_retry=6 - -# Amount of time in seconds to wait in between power -# operations (integer value) -#power_wait=2 - - -[ipmi] - -# -# Options defined in ironic.drivers.modules.ipminative -# - -# Maximum time in seconds to retry IPMI operations. (integer -# value) -#retry_timeout=60 - -# Minimum time, in seconds, between IPMI operations sent to a -# server. There is a risk with some hardware that setting this -# too low may cause the BMC to crash. Recommended setting is 5 -# seconds. (integer value) -#min_command_interval=5 - - -[keystone_authtoken] - -# -# Options defined in keystonemiddleware.auth_token -# - -# Prefix to prepend at the beginning of the path. Deprecated, -# use identity_uri. (string value) -#auth_admin_prefix= - -# Host providing the admin Identity API endpoint. Deprecated, -# use identity_uri. (string value) -#auth_host=127.0.0.1 - -# Port of the admin Identity API endpoint. Deprecated, use -# identity_uri. (integer value) -#auth_port=35357 - -# Protocol of the admin Identity API endpoint (http or https). -# Deprecated, use identity_uri. (string value) -#auth_protocol=https - -# Complete public Identity API endpoint (string value) -auth_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 - -# Complete admin Identity API endpoint. This should specify -# the unversioned root endpoint e.g. https://localhost:35357/ -# (string value) -identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 - -# API version of the admin Identity API endpoint (string -# value) -#auth_version= - -# Do not handle authorization requests within the middleware, -# but delegate the authorization decision to downstream WSGI -# components (boolean value) -#delay_auth_decision=false - -# Request timeout value for communicating with Identity API -# server. (boolean value) -#http_connect_timeout= - -# How many times are we trying to reconnect when communicating -# with Identity API Server. (integer value) -#http_request_max_retries=3 - -# This option is deprecated and may be removed in a future -# release. Single shared secret with the Keystone -# configuration used for bootstrapping a Keystone -# installation, or otherwise bypassing the normal -# authentication process. This option should not be used, use -# `admin_user` and `admin_password` instead. (string value) -#admin_token= - -# Keystone account username (string value) -admin_user={{ IRONIC_SERVICE_USER }} - -# Keystone account password (string value) -admin_password={{ IRONIC_SERVICE_PASSWORD }} - -# Keystone service account tenant name to validate user tokens -# (string value) -admin_tenant_name=service - -# Env key for the swift cache (string value) -#cache= - -# Required if Keystone server requires client certificate -# (string value) -#certfile= - -# Required if Keystone server requires client certificate -# (string value) -#keyfile= - -# A PEM encoded Certificate Authority to use when verifying -# HTTPs connections. Defaults to system CAs. (string value) -#cafile= - -# Verify HTTPS connections. (boolean value) -#insecure=false - -# Directory used to cache files related to PKI tokens (string -# value) -#signing_dir= - -# Optionally specify a list of memcached server(s) to use for -# caching. If left undefined, tokens will instead be cached -# in-process. (list value) -# Deprecated group/name - [DEFAULT]/memcache_servers -#memcached_servers= - -# In order to prevent excessive effort spent validating -# tokens, the middleware caches previously-seen tokens for a -# configurable duration (in seconds). Set to -1 to disable -# caching completely. (integer value) -#token_cache_time=300 - -# Determines the frequency at which the list of revoked tokens -# is retrieved from the Identity service (in seconds). A high -# number of revocation events combined with a low cache -# duration may significantly reduce performance. (integer -# value) -#revocation_cache_time=10 - -# (optional) if defined, indicate whether token data should be -# authenticated or authenticated and encrypted. Acceptable -# values are MAC or ENCRYPT. If MAC, token data is -# authenticated (with HMAC) in the cache. If ENCRYPT, token -# data is encrypted and authenticated in the cache. If the -# value is not one of these options or empty, auth_token will -# raise an exception on initialization. (string value) -#memcache_security_strategy= - -# (optional, mandatory if memcache_security_strategy is -# defined) this string is used for key derivation. (string -# value) -#memcache_secret_key= - -# (optional) number of seconds memcached server is considered -# dead before it is tried again. (integer value) -#memcache_pool_dead_retry=300 - -# (optional) max total number of open connections to every -# memcached server. (integer value) -#memcache_pool_maxsize=10 - -# (optional) socket timeout in seconds for communicating with -# a memcache server. (integer value) -#memcache_pool_socket_timeout=3 - -# (optional) number of seconds a connection to memcached is -# held unused in the pool before it is closed. (integer value) -#memcache_pool_unused_timeout=60 - -# (optional) number of seconds that an operation will wait to -# get a memcache client connection from the pool. (integer -# value) -#memcache_pool_conn_get_timeout=10 - -# (optional) use the advanced (eventlet safe) memcache client -# pool. The advanced pool will only work under python 2.x. -# (boolean value) -#memcache_use_advanced_pool=false - -# (optional) indicate whether to set the X-Service-Catalog -# header. If False, middleware will not ask for service -# catalog on token validation and will not set the X-Service- -# Catalog header. (boolean value) -#include_service_catalog=true - -# Used to control the use and type of token binding. Can be -# set to: "disabled" to not check token binding. "permissive" -# (default) to validate binding information if the bind type -# is of a form known to the server and ignore it if not. -# "strict" like "permissive" but if the bind type is unknown -# the token will be rejected. "required" any form of token -# binding is needed to be allowed. Finally the name of a -# binding method that must be present in tokens. (string -# value) -#enforce_token_bind=permissive - -# If true, the revocation list will be checked for cached -# tokens. This requires that PKI tokens are configured on the -# Keystone server. (boolean value) -#check_revocations_for_cached=false - -# Hash algorithms to use for hashing PKI tokens. This may be a -# single algorithm or multiple. The algorithms are those -# supported by Python standard hashlib.new(). The hashes will -# be tried in the order given, so put the preferred one first -# for performance. The result of the first hash will be stored -# in the cache. This will typically be set to multiple values -# only while migrating from a less secure algorithm to a more -# secure one. Once all the old tokens are expired this option -# should be set to a single value for better performance. -# (list value) -#hash_algorithms=md5 - - -[matchmaker_redis] - -# -# Options defined in oslo.messaging -# - -# Host to locate redis. (string value) -#host=127.0.0.1 - -# Use this port to connect to redis host. (integer value) -#port=6379 - -# Password for Redis server (optional). (string value) -#password= - - -[matchmaker_ring] - -# -# Options defined in oslo.messaging -# - -# Matchmaker ring file (JSON). (string value) -# Deprecated group/name - [DEFAULT]/matchmaker_ringfile -#ringfile=/etc/oslo/matchmaker_ring.json - - -[neutron] - -# -# Options defined in ironic.dhcp.neutron -# - -# URL for connecting to neutron. (string value) -url=http://{{ CONTROLLER_HOST_ADDRESS }}:9696 - -# Timeout value for connecting to neutron in seconds. (integer -# value) -#url_timeout=30 - -# Default authentication strategy to use when connecting to -# neutron. Can be either "keystone" or "noauth". Running -# neutron in noauth mode (related to but not affected by this -# setting) is insecure and should only be used for testing. -# (string value) -#auth_strategy=keystone - - -[pxe] - -# -# Options defined in ironic.drivers.modules.iscsi_deploy -# - -# Additional append parameters for baremetal PXE boot. (string -# value) -#pxe_append_params=nofb nomodeset vga=normal - -# Default file system format for ephemeral partition, if one -# is created. (string value) -#default_ephemeral_format=ext4 - -# Directory where images are stored on disk. (string value) -#images_path=/var/lib/ironic/images/ - -# Directory where master instance images are stored on disk. -# (string value) -#instance_master_path=/var/lib/ironic/master_images - -# Maximum size (in MiB) of cache for master images, including -# those in use. (integer value) -#image_cache_size=20480 - -# Maximum TTL (in minutes) for old master images in cache. -# (integer value) -#image_cache_ttl=10080 - -# The disk devices to scan while doing the deploy. (string -# value) -#disk_devices=cciss/c0d0,sda,hda,vda - - -# -# Options defined in ironic.drivers.modules.pxe -# - -# Template file for PXE configuration. (string value) -#pxe_config_template=$pybasedir/drivers/modules/pxe_config.template - -# Template file for PXE configuration for UEFI boot loader. -# (string value) -#uefi_pxe_config_template=$pybasedir/drivers/modules/elilo_efi_pxe_config.template - -# IP address of Ironic compute node's tftp server. (string -# value) -#tftp_server=$my_ip - -# Ironic compute node's tftp root path. (string value) -tftp_root=/srv/tftp_root/ - -# Directory where master tftp images are stored on disk. -# (string value) -tftp_master_path=/srv/tftp_root/master_images - -# Bootfile DHCP parameter. (string value) -#pxe_bootfile_name=pxelinux.0 - -# Bootfile DHCP parameter for UEFI boot mode. (string value) -#uefi_pxe_bootfile_name=elilo.efi - -# Ironic compute node's HTTP server URL. Example: -# http://192.1.2.3:8080 (string value) -#http_url= - -# Ironic compute node's HTTP root path. (string value) -#http_root=/httpboot - -# Enable iPXE boot. (boolean value) -#ipxe_enabled=false - -# The path to the main iPXE script file. (string value) -#ipxe_boot_script=$pybasedir/drivers/modules/boot.ipxe - - -[seamicro] - -# -# Options defined in ironic.drivers.modules.seamicro -# - -# Maximum retries for SeaMicro operations (integer value) -#max_retry=3 - -# Seconds to wait for power action to be completed (integer -# value) -#action_timeout=10 - - -[snmp] - -# -# Options defined in ironic.drivers.modules.snmp -# - -# Seconds to wait for power action to be completed (integer -# value) -#power_timeout=10 - - -[ssh] - -# -# Options defined in ironic.drivers.modules.ssh -# - -# libvirt uri (string value) -#libvirt_uri=qemu:///system - - -[swift] - -# -# Options defined in ironic.common.swift -# - -# Maximum number of times to retry a Swift request, before -# failing. (integer value) -#swift_max_retries=2 - - diff --git a/openstack/usr/share/openstack/ironic/policy.json b/openstack/usr/share/openstack/ironic/policy.json deleted file mode 100644 index 94ac3a5b..00000000 --- a/openstack/usr/share/openstack/ironic/policy.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "admin": "role:admin or role:administrator", - "admin_api": "is_admin:True", - "default": "rule:admin_api" -} diff --git a/openstack/usr/share/openstack/iscsi.yml b/openstack/usr/share/openstack/iscsi.yml deleted file mode 100644 index b80377ae..00000000 --- a/openstack/usr/share/openstack/iscsi.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- hosts: localhost - tasks: - - name: Update kernel module dependencies - command: depmod -a - - - name: generate InitiatorName for iscsi - shell: iscsi-iname - register: initiator_name - - - lineinfile: - dest: /etc/iscsi/initiatorname.iscsi - regexp: '^InitiatorName=$' - line: 'InitiatorName={{ initiator_name.stdout }}' - backrefs: yes diff --git a/openstack/usr/share/openstack/keystone.yml b/openstack/usr/share/openstack/keystone.yml deleted file mode 100644 index 330d74d0..00000000 --- a/openstack/usr/share/openstack/keystone.yml +++ /dev/null @@ -1,143 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/keystone.conf" - tasks: - - # RabbitMQ configuration, this may end up in a different playbook - - name: Create rabbitmq user - user: - name: rabbitmq - comment: Rabbitmq server daemon - shell: /sbin/nologin - home: /var/lib/rabbitmq - - - name: Create the rabbitmq directories - file: - path: "{{ item }}" - state: directory - owner: rabbitmq - group: rabbitmq - with_items: - - /var/run/rabbitmq - - /var/log/rabbitmq - - /etc/rabbitmq - - - name: Add the configuration needed for rabbitmq in /etc/rabbitmq using templates - template: - src: /usr/share/openstack/rabbitmq/{{ item }} - dest: /etc/rabbitmq/{{ item }} - owner: rabbitmq - group: rabbitmq - mode: 0644 - with_items: - - rabbitmq.config - - rabbitmq-env.conf - - - name: Enable and start rabbitmq services - service: - name: "{{ item }}" - enabled: yes - state: started - with_items: - - rabbitmq-server - - # Keystone configuration - - name: Create the keystone user. - user: - name: keystone - comment: Openstack Keystone Daemons - shell: /sbin/nologin - home: /var/lib/keystone - - - name: Create the /var folders for keystone - file: - path: "{{ item }}" - state: directory - owner: keystone - group: keystone - with_items: - - /var/run/keystone - - /var/lock/keystone - - /var/log/keystone - - /var/lib/keystone - - - name: Create /etc/keystone directory - file: - path: /etc/keystone - state: directory - - - name: Add the configuration needed for keystone in /etc using templates - template: - src: /usr/share/openstack/keystone/{{ item }} - dest: /etc/keystone/{{ item }} - with_lines: - - cd /usr/share/openstack/keystone && find -type f - - - name: Create postgresql user for keystone - postgresql_user: - name: "{{ KEYSTONE_DB_USER }}" - password: "{{ KEYSTONE_DB_PASSWORD }}" - sudo: yes - sudo_user: keystone - - - name: Create database for keystone services - postgresql_db: - name: keystone - owner: "{{ KEYSTONE_DB_USER }}" - sudo: yes - sudo_user: keystone - - - name: Initiatie keystone database - keystone_manage: - action: dbsync - sudo: yes - sudo_user: keystone - - - name: Enable and start openstack-keystone service - service: - name: openstack-keystone.service - enabled: yes - state: started - - - name: Create admin tenant - keystone_user: - tenant: admin - tenant_description: Admin Tenant - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 - - - name: Create admin user for the admin tenant - keystone_user: - user: admin - tenant: admin - password: "{{ KEYSTONE_ADMIN_PASSWORD }}" - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 - - - name: Create admin role for admin user in the admin tenant - keystone_user: - role: admin - user: admin - tenant: admin - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 - - - name: Create service tenant - keystone_user: - tenant: service - tenant_description: Service Tenant - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 - - - name: Add keystone endpoint - keystone_service: - name: keystone - type: identity - description: Keystone Identity Service - publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 - internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 - adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 - region: regionOne - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 diff --git a/openstack/usr/share/openstack/keystone/keystone-paste.ini b/openstack/usr/share/openstack/keystone/keystone-paste.ini deleted file mode 100644 index 46f994c3..00000000 --- a/openstack/usr/share/openstack/keystone/keystone-paste.ini +++ /dev/null @@ -1,121 +0,0 @@ -# Keystone PasteDeploy configuration file. - -[filter:debug] -paste.filter_factory = keystone.common.wsgi:Debug.factory - -[filter:build_auth_context] -paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory - -[filter:token_auth] -paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory - -[filter:admin_token_auth] -paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory - -[filter:xml_body] -paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory - -[filter:xml_body_v2] -paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV2.factory - -[filter:xml_body_v3] -paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV3.factory - -[filter:json_body] -paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory - -[filter:user_crud_extension] -paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory - -[filter:crud_extension] -paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory - -[filter:ec2_extension] -paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory - -[filter:ec2_extension_v3] -paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory - -[filter:federation_extension] -paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory - -[filter:oauth1_extension] -paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory - -[filter:s3_extension] -paste.filter_factory = keystone.contrib.s3:S3Extension.factory - -[filter:endpoint_filter_extension] -paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory - -[filter:endpoint_policy_extension] -paste.filter_factory = keystone.contrib.endpoint_policy.routers:EndpointPolicyExtension.factory - -[filter:simple_cert_extension] -paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory - -[filter:revoke_extension] -paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory - -[filter:url_normalize] -paste.filter_factory = keystone.middleware:NormalizingFilter.factory - -[filter:sizelimit] -paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory - -[filter:stats_monitoring] -paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory - -[filter:stats_reporting] -paste.filter_factory = keystone.contrib.stats:StatsExtension.factory - -[filter:access_log] -paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory - -[app:public_service] -paste.app_factory = keystone.service:public_app_factory - -[app:service_v3] -paste.app_factory = keystone.service:v3_app_factory - -[app:admin_service] -paste.app_factory = keystone.service:admin_app_factory - -[pipeline:public_api] -# The last item in this pipeline must be public_service or an equivalent -# application. It cannot be a filter. -pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension user_crud_extension public_service - -[pipeline:admin_api] -# The last item in this pipeline must be admin_service or an equivalent -# application. It cannot be a filter. -pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension s3_extension crud_extension admin_service - -[pipeline:api_v3] -# The last item in this pipeline must be service_v3 or an equivalent -# application. It cannot be a filter. -pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v3 json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension service_v3 - -[app:public_version_service] -paste.app_factory = keystone.service:public_version_app_factory - -[app:admin_version_service] -paste.app_factory = keystone.service:admin_version_app_factory - -[pipeline:public_version_api] -pipeline = sizelimit url_normalize xml_body public_version_service - -[pipeline:admin_version_api] -pipeline = sizelimit url_normalize xml_body admin_version_service - -[composite:main] -use = egg:Paste#urlmap -/v2.0 = public_api -/v3 = api_v3 -/ = public_version_api - -[composite:admin] -use = egg:Paste#urlmap -/v2.0 = admin_api -/v3 = api_v3 -/ = admin_version_api diff --git a/openstack/usr/share/openstack/keystone/keystone.conf b/openstack/usr/share/openstack/keystone/keystone.conf deleted file mode 100644 index 4e04c81b..00000000 --- a/openstack/usr/share/openstack/keystone/keystone.conf +++ /dev/null @@ -1,1588 +0,0 @@ -[DEFAULT] - -# -# Options defined in keystone -# - -# A "shared secret" that can be used to bootstrap Keystone. -# This "token" does not represent a user, and carries no -# explicit authorization. To disable in production (highly -# recommended), remove AdminTokenAuthMiddleware from your -# paste application pipelines (for example, in keystone- -# paste.ini). (string value) -admin_token={{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }} - -# The IP address of the network interface for the public -# service to listen on. (string value) -# Deprecated group/name - [DEFAULT]/bind_host -#public_bind_host=0.0.0.0 - -# The IP address of the network interface for the admin -# service to listen on. (string value) -# Deprecated group/name - [DEFAULT]/bind_host -#admin_bind_host=0.0.0.0 - -# (Deprecated) The port which the OpenStack Compute service -# listens on. This option was only used for string replacement -# in the templated catalog backend. Templated catalogs should -# replace the "$(compute_port)s" substitution with the static -# port of the compute service. As of Juno, this option is -# deprecated and will be removed in the L release. (integer -# value) -#compute_port=8774 - -# The port number which the admin service listens on. (integer -# value) -admin_port=35357 - -# The port number which the public service listens on. -# (integer value) -public_port=5000 - -# The base public endpoint URL for Keystone that is advertised -# to clients (NOTE: this does NOT affect how Keystone listens -# for connections). Defaults to the base host URL of the -# request. E.g. a request to http://server:5000/v2.0/users -# will default to http://server:5000. You should only need to -# set this value if the base URL contains a path (e.g. -# /prefix/v2.0) or the endpoint should be found on a different -# server. (string value) -#public_endpoint= - -# The base admin endpoint URL for Keystone that is advertised -# to clients (NOTE: this does NOT affect how Keystone listens -# for connections). Defaults to the base host URL of the -# request. E.g. a request to http://server:35357/v2.0/users -# will default to http://server:35357. You should only need to -# set this value if the base URL contains a path (e.g. -# /prefix/v2.0) or the endpoint should be found on a different -# server. (string value) -#admin_endpoint= - -# The number of worker processes to serve the public WSGI -# application. Defaults to number of CPUs (minimum of 2). -# (integer value) -#public_workers= - -# The number of worker processes to serve the admin WSGI -# application. Defaults to number of CPUs (minimum of 2). -# (integer value) -#admin_workers= - -# Enforced by optional sizelimit middleware -# (keystone.middleware:RequestBodySizeLimiter). (integer -# value) -#max_request_body_size=114688 - -# Limit the sizes of user & project ID/names. (integer value) -#max_param_size=64 - -# Similar to max_param_size, but provides an exception for -# token values. (integer value) -#max_token_size=8192 - -# During a SQL upgrade member_role_id will be used to create a -# new role that will replace records in the assignment table -# with explicit role grants. After migration, the -# member_role_id will be used in the API add_user_to_project. -# (string value) -#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab - -# During a SQL upgrade member_role_name will be used to create -# a new role that will replace records in the assignment table -# with explicit role grants. After migration, member_role_name -# will be ignored. (string value) -#member_role_name=_member_ - -# The value passed as the keyword "rounds" to passlib's -# encrypt method. (integer value) -#crypt_strength=40000 - -# Set this to true if you want to enable TCP_KEEPALIVE on -# server sockets, i.e. sockets used by the Keystone wsgi -# server for client connections. (boolean value) -#tcp_keepalive=false - -# Sets the value of TCP_KEEPIDLE in seconds for each server -# socket. Only applies if tcp_keepalive is true. Not supported -# on OS X. (integer value) -#tcp_keepidle=600 - -# The maximum number of entities that will be returned in a -# collection, with no limit set by default. This global limit -# may be then overridden for a specific driver, by specifying -# a list_limit in the appropriate section (e.g. [assignment]). -# (integer value) -#list_limit= - -# Set this to false if you want to enable the ability for -# user, group and project entities to be moved between domains -# by updating their domain_id. Allowing such movement is not -# recommended if the scope of a domain admin is being -# restricted by use of an appropriate policy file (see -# policy.v3cloudsample as an example). (boolean value) -#domain_id_immutable=true - -# If set to true, strict password length checking is performed -# for password manipulation. If a password exceeds the maximum -# length, the operation will fail with an HTTP 403 Forbidden -# error. If set to false, passwords are automatically -# truncated to the maximum length. (boolean value) -#strict_password_check=false - - -# -# Options defined in oslo.messaging -# - -# Use durable queues in amqp. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues=false - -# Auto-delete queues in amqp. (boolean value) -#amqp_auto_delete=false - -# Size of RPC connection pool. (integer value) -#rpc_conn_pool_size=30 - -# Qpid broker hostname. (string value) -#qpid_hostname=localhost - -# Qpid broker port. (integer value) -#qpid_port=5672 - -# Qpid HA cluster host:port pairs. (list value) -#qpid_hosts=$qpid_hostname:$qpid_port - -# Username for Qpid connection. (string value) -#qpid_username= - -# Password for Qpid connection. (string value) -#qpid_password= - -# Space separated list of SASL mechanisms to use for auth. -# (string value) -#qpid_sasl_mechanisms= - -# Seconds between connection keepalive heartbeats. (integer -# value) -#qpid_heartbeat=60 - -# Transport to use, either 'tcp' or 'ssl'. (string value) -#qpid_protocol=tcp - -# Whether to disable the Nagle algorithm. (boolean value) -#qpid_tcp_nodelay=true - -# The number of prefetched messages held by receiver. (integer -# value) -#qpid_receiver_capacity=1 - -# The qpid topology version to use. Version 1 is what was -# originally used by impl_qpid. Version 2 includes some -# backwards-incompatible changes that allow broker federation -# to work. Users should update to version 2 when they are -# able to take everything down, as it requires a clean break. -# (integer value) -#qpid_topology_version=1 - -# SSL version to use (valid only if SSL enabled). valid values -# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some -# distributions. (string value) -#kombu_ssl_version= - -# SSL key file (valid only if SSL enabled). (string value) -#kombu_ssl_keyfile= - -# SSL cert file (valid only if SSL enabled). (string value) -#kombu_ssl_certfile= - -# SSL certification authority file (valid only if SSL -# enabled). (string value) -#kombu_ssl_ca_certs= - -# How long to wait before reconnecting in response to an AMQP -# consumer cancel notification. (floating point value) -#kombu_reconnect_delay=1.0 - -# The RabbitMQ broker address where a single node is used. -# (string value) -rabbit_host={{ RABBITMQ_HOST }} - -# The RabbitMQ broker port where a single node is used. -# (integer value) -rabbit_port={{ RABBITMQ_PORT }} - -# RabbitMQ HA cluster host:port pairs. (list value) -rabbit_hosts=$rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -rabbit_use_ssl=false - -# The RabbitMQ userid. (string value) -rabbit_userid={{ RABBITMQ_USER }} - -# The RabbitMQ password. (string value) -rabbit_password={{ RABBITMQ_PASSWORD }} - -# the RabbitMQ login method (string value) -#rabbit_login_method=AMQPLAIN - -# The RabbitMQ virtual host. (string value) -#rabbit_virtual_host=/ - -# How frequently to retry connecting with RabbitMQ. (integer -# value) -#rabbit_retry_interval=1 - -# How long to backoff for between retries when connecting to -# RabbitMQ. (integer value) -#rabbit_retry_backoff=2 - -# Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -#rabbit_max_retries=0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change -# this option, you must wipe the RabbitMQ database. (boolean -# value) -#rabbit_ha_queues=false - -# If passed, use a fake RabbitMQ provider. (boolean value) -#fake_rabbit=false - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet -# interface, or IP. The "host" option should point or resolve -# to this address. (string value) -#rpc_zmq_bind_address=* - -# MatchMaker driver. (string value) -#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost - -# ZeroMQ receiver listening port. (integer value) -#rpc_zmq_port=9501 - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -#rpc_zmq_contexts=1 - -# Maximum number of ingress messages to locally buffer per -# topic. Default is unlimited. (integer value) -#rpc_zmq_topic_backlog= - -# Directory for holding IPC sockets. (string value) -#rpc_zmq_ipc_dir=/var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP -# address. Must match "host" option, if running Nova. (string -# value) -#rpc_zmq_host=keystone - -# Seconds to wait before a cast expires (TTL). Only supported -# by impl_zmq. (integer value) -#rpc_cast_timeout=30 - -# Heartbeat frequency. (integer value) -#matchmaker_heartbeat_freq=300 - -# Heartbeat time-to-live. (integer value) -#matchmaker_heartbeat_ttl=600 - -# Size of RPC greenthread pool. (integer value) -#rpc_thread_pool_size=64 - -# Driver or drivers to handle sending notifications. (multi -# valued) -#notification_driver= - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -#notification_topics=notifications - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout=60 - -# A URL representing the messaging driver to use and its full -# configuration. If not set, we fall back to the rpc_backend -# option and driver specific configuration. (string value) -#transport_url= - -# The messaging driver to use, defaults to rabbit. Other -# drivers include qpid and zmq. (string value) -rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the -# transport_url option. (string value) -#control_exchange=keystone - - -# -# Options defined in keystone.notifications -# - -# Default publisher_id for outgoing notifications (string -# value) -#default_publisher_id= - - -# -# Options defined in keystone.openstack.common.eventlet_backdoor -# - -# Enable eventlet backdoor. Acceptable values are 0, , -# and :, where 0 results in listening on a random -# tcp port number; results in listening on the -# specified port number (and not enabling backdoor if that -# port is in use); and : results in listening on -# the smallest unused port number within the specified range -# of port numbers. The chosen port is displayed in the -# service's log file. (string value) -#backdoor_port= - - -# -# Options defined in keystone.openstack.common.log -# - -# Print debugging output (set logging level to DEBUG instead -# of default WARNING level). (boolean value) -#debug=false - -# Print more verbose output (set logging level to INFO instead -# of default WARNING level). (boolean value) -#verbose=false - -# Log output to standard error. (boolean value) -#use_stderr=true - -# Format string to use for log messages with context. (string -# value) -#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages without context. -# (string value) -#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG. (string -# value) -#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. -# (string value) -#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s - -# List of logger=LEVEL pairs. (list value) -#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN - -# Enables or disables publication of error events. (boolean -# value) -#publish_errors=false - -# Enables or disables fatal status of deprecations. (boolean -# value) -#fatal_deprecations=false - -# The format for an instance that is passed with the log -# message. (string value) -#instance_format="[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log -# message. (string value) -#instance_uuid_format="[instance: %(uuid)s] " - -# The name of a logging configuration file. This file is -# appended to any existing logging configuration files. For -# details about logging configuration files, see the Python -# logging module documentation. (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append= - -# DEPRECATED. A logging.Formatter log message format string -# which may use any of the available logging.LogRecord -# attributes. This option is deprecated. Please use -# logging_context_format_string and -# logging_default_format_string instead. (string value) -#log_format= - -# Format string for %%(asctime)s in log records. Default: -# %(default)s . (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If no default is -# set, logging will go to stdout. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file= - -# (Optional) The base directory used for relative --log-file -# paths. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir= - -# Use syslog for logging. Existing syslog format is DEPRECATED -# during I, and will change in J to honor RFC5424. (boolean -# value) -use_syslog=True - -# (Optional) Enables or disables syslog rfc5424 format for -# logging. If enabled, prefixes the MSG part of the syslog -# message with APP-NAME (RFC5424). The format without the APP- -# NAME is deprecated in I, and will be removed in J. (boolean -# value) -#use_syslog_rfc_format=false - -# Syslog facility to receive log lines. (string value) -#syslog_log_facility=LOG_USER - - -# -# Options defined in keystone.openstack.common.policy -# - -# The JSON file that defines policies. (string value) -#policy_file=policy.json - -# Default rule. Enforced when a requested rule is not found. -# (string value) -#policy_default_rule=default - - -[assignment] - -# -# Options defined in keystone -# - -# Assignment backend driver. (string value) -#driver= - -# Toggle for assignment caching. This has no effect unless -# global caching is enabled. (boolean value) -#caching=true - -# TTL (in seconds) to cache assignment data. This has no -# effect unless global caching is enabled. (integer value) -#cache_time= - -# Maximum number of entities that will be returned in an -# assignment collection. (integer value) -#list_limit= - - -[auth] - -# -# Options defined in keystone -# - -# Default auth methods. (list value) -#methods=external,password,token - -# The password auth plugin module. (string value) -#password=keystone.auth.plugins.password.Password - -# The token auth plugin module. (string value) -#token=keystone.auth.plugins.token.Token - -# The external (REMOTE_USER) auth plugin module. (string -# value) -#external=keystone.auth.plugins.external.DefaultDomain - - -[cache] - -# -# Options defined in keystone -# - -# Prefix for building the configuration dictionary for the -# cache region. This should not need to be changed unless -# there is another dogpile.cache region with the same -# configuration name. (string value) -#config_prefix=cache.keystone - -# Default TTL, in seconds, for any cached item in the -# dogpile.cache region. This applies to any cached method that -# doesn't have an explicit cache expiration time defined for -# it. (integer value) -#expiration_time=600 - -# Dogpile.cache backend module. It is recommended that -# Memcache with pooling (keystone.cache.memcache_pool) or -# Redis (dogpile.cache.redis) be used in production -# deployments. Small workloads (single process) like devstack -# can use the dogpile.cache.memory backend. (string value) -#backend=keystone.common.cache.noop - -# Arguments supplied to the backend module. Specify this -# option once per argument to be passed to the dogpile.cache -# backend. Example format: ":". (multi valued) -#backend_argument= - -# Proxy classes to import that will affect the way the -# dogpile.cache backend functions. See the dogpile.cache -# documentation on changing-backend-behavior. (list value) -#proxies= - -# Global toggle for all caching using the should_cache_fn -# mechanism. (boolean value) -#enabled=false - -# Extra debugging from the cache backend (cache keys, -# get/set/delete/etc calls). This is only really useful if you -# need to see the specific cache-backend get/set/delete calls -# with the keys/values. Typically this should be left set to -# false. (boolean value) -#debug_cache_backend=false - -# Memcache servers in the format of "host:port". -# (dogpile.cache.memcache and keystone.cache.memcache_pool -# backends only) (list value) -#memcache_servers=localhost:11211 - -# Number of seconds memcached server is considered dead before -# it is tried again. (dogpile.cache.memcache and -# keystone.cache.memcache_pool backends only) (integer value) -#memcache_dead_retry=300 - -# Timeout in seconds for every call to a server. -# (dogpile.cache.memcache and keystone.cache.memcache_pool -# backends only) (integer value) -#memcache_socket_timeout=3 - -# Max total number of open connections to every memcached -# server. (keystone.cache.memcache_pool backend only) (integer -# value) -#memcache_pool_maxsize=10 - -# Number of seconds a connection to memcached is held unused -# in the pool before it is closed. -# (keystone.cache.memcache_pool backend only) (integer value) -#memcache_pool_unused_timeout=60 - -# Number of seconds that an operation will wait to get a -# memcache client connection. (integer value) -#memcache_pool_connection_get_timeout=10 - - -[catalog] - -# -# Options defined in keystone -# - -# Catalog template file name for use with the template catalog -# backend. (string value) -#template_file=default_catalog.templates - -# Catalog backend driver. (string value) -#driver=keystone.catalog.backends.sql.Catalog - -# Toggle for catalog caching. This has no effect unless global -# caching is enabled. (boolean value) -#caching=true - -# Time to cache catalog data (in seconds). This has no effect -# unless global and catalog caching are enabled. (integer -# value) -#cache_time= - -# Maximum number of entities that will be returned in a -# catalog collection. (integer value) -#list_limit= - -# (Deprecated) List of possible substitutions for use in -# formatting endpoints. Use caution when modifying this list. -# It will give users with permission to create endpoints the -# ability to see those values in your configuration file. This -# option will be removed in Juno. (list value) -#endpoint_substitution_whitelist=tenant_id,user_id,public_bind_host,admin_bind_host,compute_host,compute_port,admin_port,public_port,public_endpoint,admin_endpoint - - -[credential] - -# -# Options defined in keystone -# - -# Credential backend driver. (string value) -#driver=keystone.credential.backends.sql.Credential - - -[database] - -# -# Options defined in oslo.db -# - -# The file name to use with SQLite. (string value) -#sqlite_db=oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -#sqlite_synchronous=true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend=sqlalchemy - -# The SQLAlchemy connection string to use to connect to the -# database. (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection= -connection=postgresql://{{ KEYSTONE_DB_USER }}:{{ KEYSTONE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/keystone - -# The SQLAlchemy connection string to use to connect to the -# slave database. (string value) -#slave_connection= - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode=TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout=3600 - -# Minimum number of SQL connections to keep open in a pool. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size=1 - -# Maximum number of SQL connections to keep open in a pool. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size= - -# Maximum db connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries=10 - -# Interval between retries of opening a SQL connection. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval=10 - -# If set, use this value for max_overflow with SQLAlchemy. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow= - -# Verbosity of SQL debugging information: 0=None, -# 100=Everything. (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug=0 - -# Add Python stack traces to SQL as comment strings. (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace=false - -# If set, use this value for pool_timeout with SQLAlchemy. -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout= - -# Enable the experimental use of database reconnect on -# connection lost. (boolean value) -#use_db_reconnect=false - -# Seconds between database connection retries. (integer value) -#db_retry_interval=1 - -# If True, increases the interval between database connection -# retries up to db_max_retry_interval. (boolean value) -#db_inc_retry_interval=true - -# If db_inc_retry_interval is set, the maximum seconds between -# database connection retries. (integer value) -#db_max_retry_interval=10 - -# Maximum database connection retries before error is raised. -# Set to -1 to specify an infinite retry count. (integer -# value) -#db_max_retries=20 - - -[ec2] - -# -# Options defined in keystone -# - -# EC2Credential backend driver. (string value) -#driver=keystone.contrib.ec2.backends.kvs.Ec2 - - -[endpoint_filter] - -# -# Options defined in keystone -# - -# Endpoint Filter backend driver (string value) -#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter - -# Toggle to return all active endpoints if no filter exists. -# (boolean value) -#return_all_endpoints_if_no_filter=true - - -[endpoint_policy] - -# -# Options defined in keystone -# - -# Endpoint policy backend driver (string value) -#driver=keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy - - -[federation] - -# -# Options defined in keystone -# - -# Federation backend driver. (string value) -#driver=keystone.contrib.federation.backends.sql.Federation - -# Value to be used when filtering assertion parameters from -# the environment. (string value) -#assertion_prefix= - - -[identity] - -# -# Options defined in keystone -# - -# This references the domain to use for all Identity API v2 -# requests (which are not aware of domains). A domain with -# this ID will be created for you by keystone-manage db_sync -# in migration 008. The domain referenced by this ID cannot be -# deleted on the v3 API, to prevent accidentally breaking the -# v2 API. There is nothing special about this domain, other -# than the fact that it must exist to order to maintain -# support for your v2 clients. (string value) -#default_domain_id=default - -# A subset (or all) of domains can have their own identity -# driver, each with their own partial configuration file in a -# domain configuration directory. Only values specific to the -# domain need to be placed in the domain specific -# configuration file. This feature is disabled by default; set -# to true to enable. (boolean value) -#domain_specific_drivers_enabled=false - -# Path for Keystone to locate the domain specific identity -# configuration files if domain_specific_drivers_enabled is -# set to true. (string value) -#domain_config_dir=/etc/keystone/domains - -# Identity backend driver. (string value) -#driver=keystone.identity.backends.sql.Identity - -# Maximum supported length for user passwords; decrease to -# improve performance. (integer value) -#max_password_length=4096 - -# Maximum number of entities that will be returned in an -# identity collection. (integer value) -#list_limit= - - -[identity_mapping] - -# -# Options defined in keystone -# - -# Keystone Identity Mapping backend driver. (string value) -#driver=keystone.identity.mapping_backends.sql.Mapping - -# Public ID generator for user and group entities. The -# Keystone identity mapper only supports generators that -# produce no more than 64 characters. (string value) -#generator=keystone.identity.id_generators.sha256.Generator - -# The format of user and group IDs changed in Juno for -# backends that do not generate UUIDs (e.g. LDAP), with -# keystone providing a hash mapping to the underlying -# attribute in LDAP. By default this mapping is disabled, -# which ensures that existing IDs will not change. Even when -# the mapping is enabled by using domain specific drivers, any -# users and groups from the default domain being handled by -# LDAP will still not be mapped to ensure their IDs remain -# backward compatible. Setting this value to False will enable -# the mapping for even the default LDAP driver. It is only -# safe to do this if you do not already have assignments for -# users and groups from the default LDAP domain, and it is -# acceptable for Keystone to provide the different IDs to -# clients than it did previously. Typically this means that -# the only time you can set this value to False is when -# configuring a fresh installation. (boolean value) -#backward_compatible_ids=true - - -[kvs] - -# -# Options defined in keystone -# - -# Extra dogpile.cache backend modules to register with the -# dogpile.cache library. (list value) -#backends= - -# Prefix for building the configuration dictionary for the KVS -# region. This should not need to be changed unless there is -# another dogpile.cache region with the same configuration -# name. (string value) -#config_prefix=keystone.kvs - -# Toggle to disable using a key-mangling function to ensure -# fixed length keys. This is toggle-able for debugging -# purposes, it is highly recommended to always leave this set -# to true. (boolean value) -#enable_key_mangler=true - -# Default lock timeout for distributed locking. (integer -# value) -#default_lock_timeout=5 - - -[ldap] - -# -# Options defined in keystone -# - -# URL for connecting to the LDAP server. (string value) -#url=ldap://localhost - -# User BindDN to query the LDAP server. (string value) -#user= - -# Password for the BindDN to query the LDAP server. (string -# value) -#password= - -# LDAP server suffix (string value) -#suffix=cn=example,cn=com - -# If true, will add a dummy member to groups. This is required -# if the objectclass for groups requires the "member" -# attribute. (boolean value) -#use_dumb_member=false - -# DN of the "dummy member" to use when "use_dumb_member" is -# enabled. (string value) -#dumb_member=cn=dumb,dc=nonexistent - -# Delete subtrees using the subtree delete control. Only -# enable this option if your LDAP server supports subtree -# deletion. (boolean value) -#allow_subtree_delete=false - -# The LDAP scope for queries, this can be either "one" -# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). -# (string value) -#query_scope=one - -# Maximum results per page; a value of zero ("0") disables -# paging. (integer value) -#page_size=0 - -# The LDAP dereferencing option for queries. This can be -# either "never", "searching", "always", "finding" or -# "default". The "default" option falls back to using default -# dereferencing configured by your ldap.conf. (string value) -#alias_dereferencing=default - -# Sets the LDAP debugging level for LDAP calls. A value of 0 -# means that debugging is not enabled. This value is a -# bitmask, consult your LDAP documentation for possible -# values. (integer value) -#debug_level= - -# Override the system's default referral chasing behavior for -# queries. (boolean value) -#chase_referrals= - -# Search base for users. (string value) -#user_tree_dn= - -# LDAP search filter for users. (string value) -#user_filter= - -# LDAP objectclass for users. (string value) -#user_objectclass=inetOrgPerson - -# LDAP attribute mapped to user id. WARNING: must not be a -# multivalued attribute. (string value) -#user_id_attribute=cn - -# LDAP attribute mapped to user name. (string value) -#user_name_attribute=sn - -# LDAP attribute mapped to user email. (string value) -#user_mail_attribute=mail - -# LDAP attribute mapped to password. (string value) -#user_pass_attribute=userPassword - -# LDAP attribute mapped to user enabled flag. (string value) -#user_enabled_attribute=enabled - -# Invert the meaning of the boolean enabled values. Some LDAP -# servers use a boolean lock attribute where "true" means an -# account is disabled. Setting "user_enabled_invert = true" -# will allow these lock attributes to be used. This setting -# will have no effect if "user_enabled_mask" or -# "user_enabled_emulation" settings are in use. (boolean -# value) -#user_enabled_invert=false - -# Bitmask integer to indicate the bit that the enabled value -# is stored in if the LDAP server represents "enabled" as a -# bit on an integer rather than a boolean. A value of "0" -# indicates the mask is not used. If this is not set to "0" -# the typical value is "2". This is typically used when -# "user_enabled_attribute = userAccountControl". (integer -# value) -#user_enabled_mask=0 - -# Default value to enable users. This should match an -# appropriate int value if the LDAP server uses non-boolean -# (bitmask) values to indicate if a user is enabled or -# disabled. If this is not set to "True" the typical value is -# "512". This is typically used when "user_enabled_attribute = -# userAccountControl". (string value) -#user_enabled_default=True - -# List of attributes stripped off the user on update. (list -# value) -#user_attribute_ignore=default_project_id,tenants - -# LDAP attribute mapped to default_project_id for users. -# (string value) -#user_default_project_id_attribute= - -# Allow user creation in LDAP backend. (boolean value) -#user_allow_create=true - -# Allow user updates in LDAP backend. (boolean value) -#user_allow_update=true - -# Allow user deletion in LDAP backend. (boolean value) -#user_allow_delete=true - -# If true, Keystone uses an alternative method to determine if -# a user is enabled or not by checking if they are a member of -# the "user_enabled_emulation_dn" group. (boolean value) -#user_enabled_emulation=false - -# DN of the group entry to hold enabled users when using -# enabled emulation. (string value) -#user_enabled_emulation_dn= - -# List of additional LDAP attributes used for mapping -# additional attribute mappings for users. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#user_additional_attribute_mapping= - -# Search base for projects (string value) -# Deprecated group/name - [ldap]/tenant_tree_dn -#project_tree_dn= - -# LDAP search filter for projects. (string value) -# Deprecated group/name - [ldap]/tenant_filter -#project_filter= - -# LDAP objectclass for projects. (string value) -# Deprecated group/name - [ldap]/tenant_objectclass -#project_objectclass=groupOfNames - -# LDAP attribute mapped to project id. (string value) -# Deprecated group/name - [ldap]/tenant_id_attribute -#project_id_attribute=cn - -# LDAP attribute mapped to project membership for user. -# (string value) -# Deprecated group/name - [ldap]/tenant_member_attribute -#project_member_attribute=member - -# LDAP attribute mapped to project name. (string value) -# Deprecated group/name - [ldap]/tenant_name_attribute -#project_name_attribute=ou - -# LDAP attribute mapped to project description. (string value) -# Deprecated group/name - [ldap]/tenant_desc_attribute -#project_desc_attribute=description - -# LDAP attribute mapped to project enabled. (string value) -# Deprecated group/name - [ldap]/tenant_enabled_attribute -#project_enabled_attribute=enabled - -# LDAP attribute mapped to project domain_id. (string value) -# Deprecated group/name - [ldap]/tenant_domain_id_attribute -#project_domain_id_attribute=businessCategory - -# List of attributes stripped off the project on update. (list -# value) -# Deprecated group/name - [ldap]/tenant_attribute_ignore -#project_attribute_ignore= - -# Allow project creation in LDAP backend. (boolean value) -# Deprecated group/name - [ldap]/tenant_allow_create -#project_allow_create=true - -# Allow project update in LDAP backend. (boolean value) -# Deprecated group/name - [ldap]/tenant_allow_update -#project_allow_update=true - -# Allow project deletion in LDAP backend. (boolean value) -# Deprecated group/name - [ldap]/tenant_allow_delete -#project_allow_delete=true - -# If true, Keystone uses an alternative method to determine if -# a project is enabled or not by checking if they are a member -# of the "project_enabled_emulation_dn" group. (boolean value) -# Deprecated group/name - [ldap]/tenant_enabled_emulation -#project_enabled_emulation=false - -# DN of the group entry to hold enabled projects when using -# enabled emulation. (string value) -# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn -#project_enabled_emulation_dn= - -# Additional attribute mappings for projects. Attribute -# mapping format is :, where ldap_attr -# is the attribute in the LDAP entry and user_attr is the -# Identity API attribute. (list value) -# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping -#project_additional_attribute_mapping= - -# Search base for roles. (string value) -#role_tree_dn= - -# LDAP search filter for roles. (string value) -#role_filter= - -# LDAP objectclass for roles. (string value) -#role_objectclass=organizationalRole - -# LDAP attribute mapped to role id. (string value) -#role_id_attribute=cn - -# LDAP attribute mapped to role name. (string value) -#role_name_attribute=ou - -# LDAP attribute mapped to role membership. (string value) -#role_member_attribute=roleOccupant - -# List of attributes stripped off the role on update. (list -# value) -#role_attribute_ignore= - -# Allow role creation in LDAP backend. (boolean value) -#role_allow_create=true - -# Allow role update in LDAP backend. (boolean value) -#role_allow_update=true - -# Allow role deletion in LDAP backend. (boolean value) -#role_allow_delete=true - -# Additional attribute mappings for roles. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#role_additional_attribute_mapping= - -# Search base for groups. (string value) -#group_tree_dn= - -# LDAP search filter for groups. (string value) -#group_filter= - -# LDAP objectclass for groups. (string value) -#group_objectclass=groupOfNames - -# LDAP attribute mapped to group id. (string value) -#group_id_attribute=cn - -# LDAP attribute mapped to group name. (string value) -#group_name_attribute=ou - -# LDAP attribute mapped to show group membership. (string -# value) -#group_member_attribute=member - -# LDAP attribute mapped to group description. (string value) -#group_desc_attribute=description - -# List of attributes stripped off the group on update. (list -# value) -#group_attribute_ignore= - -# Allow group creation in LDAP backend. (boolean value) -#group_allow_create=true - -# Allow group update in LDAP backend. (boolean value) -#group_allow_update=true - -# Allow group deletion in LDAP backend. (boolean value) -#group_allow_delete=true - -# Additional attribute mappings for groups. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#group_additional_attribute_mapping= - -# CA certificate file path for communicating with LDAP -# servers. (string value) -#tls_cacertfile= - -# CA certificate directory path for communicating with LDAP -# servers. (string value) -#tls_cacertdir= - -# Enable TLS for communicating with LDAP servers. (boolean -# value) -#use_tls=false - -# Valid options for tls_req_cert are demand, never, and allow. -# (string value) -#tls_req_cert=demand - -# Enable LDAP connection pooling. (boolean value) -#use_pool=false - -# Connection pool size. (integer value) -#pool_size=10 - -# Maximum count of reconnect trials. (integer value) -#pool_retry_max=3 - -# Time span in seconds to wait between two reconnect trials. -# (floating point value) -#pool_retry_delay=0.1 - -# Connector timeout in seconds. Value -1 indicates indefinite -# wait for response. (integer value) -#pool_connection_timeout=-1 - -# Connection lifetime in seconds. (integer value) -#pool_connection_lifetime=600 - -# Enable LDAP connection pooling for end user authentication. -# If use_pool is disabled, then this setting is meaningless -# and is not used at all. (boolean value) -#use_auth_pool=false - -# End user auth connection pool size. (integer value) -#auth_pool_size=100 - -# End user auth connection lifetime in seconds. (integer -# value) -#auth_pool_connection_lifetime=60 - - -[matchmaker_redis] - -# -# Options defined in oslo.messaging -# - -# Host to locate redis. (string value) -#host=127.0.0.1 - -# Use this port to connect to redis host. (integer value) -#port=6379 - -# Password for Redis server (optional). (string value) -#password= - - -[matchmaker_ring] - -# -# Options defined in oslo.messaging -# - -# Matchmaker ring file (JSON). (string value) -# Deprecated group/name - [DEFAULT]/matchmaker_ringfile -#ringfile=/etc/oslo/matchmaker_ring.json - - -[memcache] - -# -# Options defined in keystone -# - -# Memcache servers in the format of "host:port". (list value) -#servers=localhost:11211 - -# Number of seconds memcached server is considered dead before -# it is tried again. This is used by the key value store -# system (e.g. token pooled memcached persistence backend). -# (integer value) -#dead_retry=300 - -# Timeout in seconds for every call to a server. This is used -# by the key value store system (e.g. token pooled memcached -# persistence backend). (integer value) -#socket_timeout=3 - -# Max total number of open connections to every memcached -# server. This is used by the key value store system (e.g. -# token pooled memcached persistence backend). (integer value) -#pool_maxsize=10 - -# Number of seconds a connection to memcached is held unused -# in the pool before it is closed. This is used by the key -# value store system (e.g. token pooled memcached persistence -# backend). (integer value) -#pool_unused_timeout=60 - -# Number of seconds that an operation will wait to get a -# memcache client connection. This is used by the key value -# store system (e.g. token pooled memcached persistence -# backend). (integer value) -#pool_connection_get_timeout=10 - - -[oauth1] - -# -# Options defined in keystone -# - -# Credential backend driver. (string value) -#driver=keystone.contrib.oauth1.backends.sql.OAuth1 - -# Duration (in seconds) for the OAuth Request Token. (integer -# value) -#request_token_duration=28800 - -# Duration (in seconds) for the OAuth Access Token. (integer -# value) -#access_token_duration=86400 - - -[os_inherit] - -# -# Options defined in keystone -# - -# role-assignment inheritance to projects from owning domain -# can be optionally enabled. (boolean value) -#enabled=false - - -[paste_deploy] - -# -# Options defined in keystone -# - -# Name of the paste configuration file that defines the -# available pipelines. (string value) -#config_file=keystone-paste.ini - - -[policy] - -# -# Options defined in keystone -# - -# Policy backend driver. (string value) -#driver=keystone.policy.backends.sql.Policy - -# Maximum number of entities that will be returned in a policy -# collection. (integer value) -#list_limit= - - -[revoke] - -# -# Options defined in keystone -# - -# An implementation of the backend for persisting revocation -# events. (string value) -#driver=keystone.contrib.revoke.backends.kvs.Revoke - -# This value (calculated in seconds) is added to token -# expiration before a revocation event may be removed from the -# backend. (integer value) -#expiration_buffer=1800 - -# Toggle for revocation event caching. This has no effect -# unless global caching is enabled. (boolean value) -#caching=true - - -[saml] - -# -# Options defined in keystone -# - -# Default TTL, in seconds, for any generated SAML assertion -# created by Keystone. (integer value) -#assertion_expiration_time=3600 - -# Binary to be called for XML signing. Install the appropriate -# package, specify absolute path or adjust your PATH -# environment variable if the binary cannot be found. (string -# value) -#xmlsec1_binary=xmlsec1 - -# Path of the certfile for SAML signing. For non-production -# environments, you may be interested in using `keystone- -# manage pki_setup` to generate self-signed certificates. -# Note, the path cannot contain a comma. (string value) -#certfile=/etc/keystone/ssl/certs/signing_cert.pem - -# Path of the keyfile for SAML signing. Note, the path cannot -# contain a comma. (string value) -#keyfile=/etc/keystone/ssl/private/signing_key.pem - -# Entity ID value for unique Identity Provider identification. -# Usually FQDN is set with a suffix. A value is required to -# generate IDP Metadata. For example: -# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp -# (string value) -#idp_entity_id= - -# Identity Provider Single-Sign-On service value, required in -# the Identity Provider's metadata. A value is required to -# generate IDP Metadata. For example: -# https://keystone.example.com/v3/OS-FEDERATION/saml2/sso -# (string value) -#idp_sso_endpoint= - -# Language used by the organization. (string value) -#idp_lang=en - -# Organization name the installation belongs to. (string -# value) -#idp_organization_name= - -# Organization name to be displayed. (string value) -#idp_organization_display_name= - -# URL of the organization. (string value) -#idp_organization_url= - -# Company of contact person. (string value) -#idp_contact_company= - -# Given name of contact person (string value) -#idp_contact_name= - -# Surname of contact person. (string value) -#idp_contact_surname= - -# Email address of contact person. (string value) -#idp_contact_email= - -# Telephone number of contact person. (string value) -#idp_contact_telephone= - -# Contact type. Allowed values are: technical, support, -# administrative billing, and other (string value) -#idp_contact_type=other - -# Path to the Identity Provider Metadata file. This file -# should be generated with the keystone-manage -# saml_idp_metadata command. (string value) -#idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml - - -[signing] - -# -# Options defined in keystone -# - -# Deprecated in favor of provider in the [token] section. -# (string value) -#token_format= - -# Path of the certfile for token signing. For non-production -# environments, you may be interested in using `keystone- -# manage pki_setup` to generate self-signed certificates. -# (string value) -#certfile=/etc/keystone/ssl/certs/signing_cert.pem - -# Path of the keyfile for token signing. (string value) -#keyfile=/etc/keystone/ssl/private/signing_key.pem - -# Path of the CA for token signing. (string value) -#ca_certs=/etc/keystone/ssl/certs/ca.pem - -# Path of the CA key for token signing. (string value) -#ca_key=/etc/keystone/ssl/private/cakey.pem - -# Key size (in bits) for token signing cert (auto generated -# certificate). (integer value) -#key_size=2048 - -# Days the token signing cert is valid for (auto generated -# certificate). (integer value) -#valid_days=3650 - -# Certificate subject (auto generated certificate) for token -# signing. (string value) -#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com - - -[ssl] - -# -# Options defined in keystone -# - -# Toggle for SSL support on the Keystone eventlet servers. -# (boolean value) -#enable=false - -# Path of the certfile for SSL. For non-production -# environments, you may be interested in using `keystone- -# manage ssl_setup` to generate self-signed certificates. -# (string value) -#certfile=/etc/keystone/ssl/certs/keystone.pem - -# Path of the keyfile for SSL. (string value) -#keyfile=/etc/keystone/ssl/private/keystonekey.pem - -# Path of the ca cert file for SSL. (string value) -#ca_certs=/etc/keystone/ssl/certs/ca.pem - -# Path of the CA key file for SSL. (string value) -#ca_key=/etc/keystone/ssl/private/cakey.pem - -# Require client certificate. (boolean value) -#cert_required=false - -# SSL key length (in bits) (auto generated certificate). -# (integer value) -#key_size=1024 - -# Days the certificate is valid for once signed (auto -# generated certificate). (integer value) -#valid_days=3650 - -# SSL certificate subject (auto generated certificate). -# (string value) -#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost - - -[stats] - -# -# Options defined in keystone -# - -# Stats backend driver. (string value) -#driver=keystone.contrib.stats.backends.kvs.Stats - - -[token] - -# -# Options defined in keystone -# - -# External auth mechanisms that should add bind information to -# token, e.g., kerberos,x509. (list value) -#bind= - -# Enforcement policy on tokens presented to Keystone with bind -# information. One of disabled, permissive, strict, required -# or a specifically required bind mode, e.g., kerberos or x509 -# to require binding to that authentication. (string value) -#enforce_token_bind=permissive - -# Amount of time a token should remain valid (in seconds). -# (integer value) -#expiration=3600 - -# Controls the token construction, validation, and revocation -# operations. Core providers are -# "keystone.token.providers.[pkiz|pki|uuid].Provider". The -# default provider is pkiz. (string value) -provider=keystone.token.providers.uuid.Provider - -# Token persistence backend driver. (string value) -driver=keystone.token.backends.sql.Token - -# Toggle for token system caching. This has no effect unless -# global caching is enabled. (boolean value) -#caching=true - -# Time to cache the revocation list and the revocation events -# if revoke extension is enabled (in seconds). This has no -# effect unless global and token caching are enabled. (integer -# value) -#revocation_cache_time=3600 - -# Time to cache tokens (in seconds). This has no effect unless -# global and token caching are enabled. (integer value) -#cache_time= - -# Revoke token by token identifier. Setting revoke_by_id to -# true enables various forms of enumerating tokens, e.g. `list -# tokens for user`. These enumerations are processed to -# determine the list of tokens to revoke. Only disable if you -# are switching to using the Revoke extension with a backend -# other than KVS, which stores events in memory. (boolean -# value) -#revoke_by_id=true - -# The hash algorithm to use for PKI tokens. This can be set to -# any algorithm that hashlib supports. WARNING: Before -# changing this value, the auth_token middleware must be -# configured with the hash_algorithms, otherwise token -# revocation will not be processed correctly. (string value) -#hash_algorithm=md5 - - -[trust] - -# -# Options defined in keystone -# - -# Delegation and impersonation features can be optionally -# disabled. (boolean value) -#enabled=true - -# Trust backend driver. (string value) -#driver=keystone.trust.backends.sql.Trust - - diff --git a/openstack/usr/share/openstack/keystone/logging.conf b/openstack/usr/share/openstack/keystone/logging.conf deleted file mode 100644 index 6cb8c425..00000000 --- a/openstack/usr/share/openstack/keystone/logging.conf +++ /dev/null @@ -1,65 +0,0 @@ -[loggers] -keys=root,access - -[handlers] -keys=production,file,access_file,devel - -[formatters] -keys=minimal,normal,debug - - -########### -# Loggers # -########### - -[logger_root] -level=WARNING -handlers=file - -[logger_access] -level=INFO -qualname=access -handlers=access_file - - -################ -# Log Handlers # -################ - -[handler_production] -class=handlers.SysLogHandler -level=ERROR -formatter=normal -args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) - -[handler_file] -class=handlers.WatchedFileHandler -level=WARNING -formatter=normal -args=('error.log',) - -[handler_access_file] -class=handlers.WatchedFileHandler -level=INFO -formatter=minimal -args=('access.log',) - -[handler_devel] -class=StreamHandler -level=NOTSET -formatter=debug -args=(sys.stdout,) - - -################## -# Log Formatters # -################## - -[formatter_minimal] -format=%(message)s - -[formatter_normal] -format=(%(name)s): %(asctime)s %(levelname)s %(message)s - -[formatter_debug] -format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/openstack/usr/share/openstack/keystone/policy.json b/openstack/usr/share/openstack/keystone/policy.json deleted file mode 100644 index af65205e..00000000 --- a/openstack/usr/share/openstack/keystone/policy.json +++ /dev/null @@ -1,171 +0,0 @@ -{ - "admin_required": "role:admin or is_admin:1", - "service_role": "role:service", - "service_or_admin": "rule:admin_required or rule:service_role", - "owner" : "user_id:%(user_id)s", - "admin_or_owner": "rule:admin_required or rule:owner", - - "default": "rule:admin_required", - - "identity:get_region": "", - "identity:list_regions": "", - "identity:create_region": "rule:admin_required", - "identity:update_region": "rule:admin_required", - "identity:delete_region": "rule:admin_required", - - "identity:get_service": "rule:admin_required", - "identity:list_services": "rule:admin_required", - "identity:create_service": "rule:admin_required", - "identity:update_service": "rule:admin_required", - "identity:delete_service": "rule:admin_required", - - "identity:get_endpoint": "rule:admin_required", - "identity:list_endpoints": "rule:admin_required", - "identity:create_endpoint": "rule:admin_required", - "identity:update_endpoint": "rule:admin_required", - "identity:delete_endpoint": "rule:admin_required", - - "identity:get_domain": "rule:admin_required", - "identity:list_domains": "rule:admin_required", - "identity:create_domain": "rule:admin_required", - "identity:update_domain": "rule:admin_required", - "identity:delete_domain": "rule:admin_required", - - "identity:get_project": "rule:admin_required", - "identity:list_projects": "rule:admin_required", - "identity:list_user_projects": "rule:admin_or_owner", - "identity:create_project": "rule:admin_required", - "identity:update_project": "rule:admin_required", - "identity:delete_project": "rule:admin_required", - - "identity:get_user": "rule:admin_required", - "identity:list_users": "rule:admin_required", - "identity:create_user": "rule:admin_required", - "identity:update_user": "rule:admin_required", - "identity:delete_user": "rule:admin_required", - "identity:change_password": "rule:admin_or_owner", - - "identity:get_group": "rule:admin_required", - "identity:list_groups": "rule:admin_required", - "identity:list_groups_for_user": "rule:admin_or_owner", - "identity:create_group": "rule:admin_required", - "identity:update_group": "rule:admin_required", - "identity:delete_group": "rule:admin_required", - "identity:list_users_in_group": "rule:admin_required", - "identity:remove_user_from_group": "rule:admin_required", - "identity:check_user_in_group": "rule:admin_required", - "identity:add_user_to_group": "rule:admin_required", - - "identity:get_credential": "rule:admin_required", - "identity:list_credentials": "rule:admin_required", - "identity:create_credential": "rule:admin_required", - "identity:update_credential": "rule:admin_required", - "identity:delete_credential": "rule:admin_required", - - "identity:ec2_get_credential": "rule:admin_or_owner", - "identity:ec2_list_credentials": "rule:admin_or_owner", - "identity:ec2_create_credential": "rule:admin_or_owner", - "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", - - "identity:get_role": "rule:admin_required", - "identity:list_roles": "rule:admin_required", - "identity:create_role": "rule:admin_required", - "identity:update_role": "rule:admin_required", - "identity:delete_role": "rule:admin_required", - - "identity:check_grant": "rule:admin_required", - "identity:list_grants": "rule:admin_required", - "identity:create_grant": "rule:admin_required", - "identity:revoke_grant": "rule:admin_required", - - "identity:list_role_assignments": "rule:admin_required", - - "identity:get_policy": "rule:admin_required", - "identity:list_policies": "rule:admin_required", - "identity:create_policy": "rule:admin_required", - "identity:update_policy": "rule:admin_required", - "identity:delete_policy": "rule:admin_required", - - "identity:check_token": "rule:admin_required", - "identity:validate_token": "rule:service_or_admin", - "identity:validate_token_head": "rule:service_or_admin", - "identity:revocation_list": "rule:service_or_admin", - "identity:revoke_token": "rule:admin_or_owner", - - "identity:create_trust": "user_id:%(trust.trustor_user_id)s", - "identity:get_trust": "rule:admin_or_owner", - "identity:list_trusts": "", - "identity:list_roles_for_trust": "", - "identity:check_role_for_trust": "", - "identity:get_role_for_trust": "", - "identity:delete_trust": "", - - "identity:create_consumer": "rule:admin_required", - "identity:get_consumer": "rule:admin_required", - "identity:list_consumers": "rule:admin_required", - "identity:delete_consumer": "rule:admin_required", - "identity:update_consumer": "rule:admin_required", - - "identity:authorize_request_token": "rule:admin_required", - "identity:list_access_token_roles": "rule:admin_required", - "identity:get_access_token_role": "rule:admin_required", - "identity:list_access_tokens": "rule:admin_required", - "identity:get_access_token": "rule:admin_required", - "identity:delete_access_token": "rule:admin_required", - - "identity:list_projects_for_endpoint": "rule:admin_required", - "identity:add_endpoint_to_project": "rule:admin_required", - "identity:check_endpoint_in_project": "rule:admin_required", - "identity:list_endpoints_for_project": "rule:admin_required", - "identity:remove_endpoint_from_project": "rule:admin_required", - - "identity:create_endpoint_group": "rule:admin_required", - "identity:list_endpoint_groups": "rule:admin_required", - "identity:get_endpoint_group": "rule:admin_required", - "identity:update_endpoint_group": "rule:admin_required", - "identity:delete_endpoint_group": "rule:admin_required", - "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", - "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", - "identity:list_endpoint_groups_for_project": "rule:admin_required", - "identity:add_endpoint_group_to_project": "rule:admin_required", - "identity:remove_endpoint_group_from_project": "rule:admin_required", - - "identity:create_identity_provider": "rule:admin_required", - "identity:list_identity_providers": "rule:admin_required", - "identity:get_identity_providers": "rule:admin_required", - "identity:update_identity_provider": "rule:admin_required", - "identity:delete_identity_provider": "rule:admin_required", - - "identity:create_protocol": "rule:admin_required", - "identity:update_protocol": "rule:admin_required", - "identity:get_protocol": "rule:admin_required", - "identity:list_protocols": "rule:admin_required", - "identity:delete_protocol": "rule:admin_required", - - "identity:create_mapping": "rule:admin_required", - "identity:get_mapping": "rule:admin_required", - "identity:list_mappings": "rule:admin_required", - "identity:delete_mapping": "rule:admin_required", - "identity:update_mapping": "rule:admin_required", - - "identity:get_auth_catalog": "", - "identity:get_auth_projects": "", - "identity:get_auth_domains": "", - - "identity:list_projects_for_groups": "", - "identity:list_domains_for_groups": "", - - "identity:list_revoke_events": "", - - "identity:create_policy_association_for_endpoint": "rule:admin_required", - "identity:check_policy_association_for_endpoint": "rule:admin_required", - "identity:delete_policy_association_for_endpoint": "rule:admin_required", - "identity:create_policy_association_for_service": "rule:admin_required", - "identity:check_policy_association_for_service": "rule:admin_required", - "identity:delete_policy_association_for_service": "rule:admin_required", - "identity:create_policy_association_for_region_and_service": "rule:admin_required", - "identity:check_policy_association_for_region_and_service": "rule:admin_required", - "identity:delete_policy_association_for_region_and_service": "rule:admin_required", - "identity:get_policy_for_endpoint": "rule:admin_required", - "identity:list_endpoints_for_policy": "rule:admin_required" -} diff --git a/openstack/usr/share/openstack/network.yml b/openstack/usr/share/openstack/network.yml deleted file mode 100644 index f99f7f1a..00000000 --- a/openstack/usr/share/openstack/network.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- hosts: localhost - vars_files: - - /etc/openstack/network.conf - tasks: -# Create the bridges to use the External network mapped - -# Count number of network interfaces (interfaces starting with 'e') - - shell: ls /sys/class/net | grep ^e.* | wc -l - register: number_interfaces - -# Abort if there number of interfaces != 1 - - fail: - msg: More than one, or none network interfaces found. - when: EXTERNAL_INTERFACE is not defined and number_interfaces.stdout != "1" - - - shell: ls /sys/class/net | grep ^e.* - register: interface_name - when: EXTERNAL_INTERFACE is not defined - - - set_fact: - ETH_INTERFACE: "{{ interface_name.stdout }}" - when: EXTERNAL_INTERFACE is not defined - - - set_fact: - ETH_INTERFACE: "{{ EXTERNAL_INTERFACE }}" - when: EXTERNAL_INTERFACE is defined - - - set_fact: - ETH_MAC_ADDRESS: "{{ hostvars['localhost']['ansible_' + ETH_INTERFACE]['macaddress'] }}" - - - name: Create the /run/systemd/network - file: - path: /run/systemd/network - state: directory - - - name: Disable dhcp on the bound physical interface - template: - src: /usr/share/openstack/extras/00-disable-device.network - dest: /run/systemd/network/00-disable-{{ item }}-config.network - with_items: - - "{{ ETH_INTERFACE }}" - - - name: Disable dhcp on all the internal interfaces - template: - src: /usr/share/openstack/extras/00-disable-device.network - dest: /run/systemd/network/00-disable-{{ item }}-config.network - with_items: - - ovs-system - - - openvswitch_bridge: - bridge: br-ex - state: present - - - openvswitch_port: - bridge: br-ex - port: "{{ ETH_INTERFACE }}" - state: present - - - shell: ovs-vsctl set bridge br-ex other-config:hwaddr={{ ETH_MAC_ADDRESS }} - - - name: Enable dhcp on the Open vSwitch device that replaces our external interface - template: - src: /usr/share/openstack/extras/60-device-dhcp.network - dest: /run/systemd/network/60-{{ item }}-dhcp.network - with_items: - - br-ex diff --git a/openstack/usr/share/openstack/neutron-config.yml b/openstack/usr/share/openstack/neutron-config.yml deleted file mode 100644 index 97f4c76e..00000000 --- a/openstack/usr/share/openstack/neutron-config.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/neutron.conf" - tasks: - - - name: Create the neutron user. - user: - name: neutron - comment: Openstack Neutron Daemons - shell: /sbin/nologin - home: /var/lib/neutron - - - name: Create the /var folders for neutron - file: - path: "{{ item }}" - state: directory - owner: neutron - group: neutron - with_items: - - /var/run/neutron - - /var/lock/neutron - - /var/log/neutron - - - name: Get service tenant id needed in neutron.conf - shell: | - keystone \ - --os-endpoint http://{{ CONTROLLER_HOST_ADDRESS|quote }}:35357/v2.0 \ - --os-token {{ KEYSTONE_TEMPORARY_ADMIN_TOKEN|quote }} \ - tenant-get service | grep id | tr -d " " | cut -d"|" -f3 - register: tenant_service_id - - - set_fact: - SERVICE_TENANT_ID: "{{ tenant_service_id.stdout }}" - - - name: Create the directories needed for Neutron configuration files. - file: - path: /etc/{{ item }} - state: directory - with_lines: - - cd /usr/share/openstack && find neutron -type d - - - name: Add configuration needed for neutron using templates - template: - src: /usr/share/openstack/{{ item }} - dest: /etc/{{ item }} - with_lines: - - cd /usr/share/openstack && find neutron -type f diff --git a/openstack/usr/share/openstack/neutron-db.yml b/openstack/usr/share/openstack/neutron-db.yml deleted file mode 100644 index 91dde6fe..00000000 --- a/openstack/usr/share/openstack/neutron-db.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/neutron.conf" - tasks: - - name: Create neutron service user in service tenant - keystone_user: - user: "{{ NEUTRON_SERVICE_USER }}" - password: "{{ NEUTRON_SERVICE_PASSWORD }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Add admin role to neutron service user in service tenant - keystone_user: - role: admin - user: "{{ NEUTRON_SERVICE_USER }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - keystone_service: - name: neutron - type: network - description: Openstack Compute Networking - publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696 - internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696 - adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696 - region: regionOne - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Create postgresql user for neutron - postgresql_user: - name: "{{ NEUTRON_DB_USER }}" - password: "{{ NEUTRON_DB_PASSWORD }}" - sudo: yes - sudo_user: neutron - - - name: Create database for neutron services - postgresql_db: - name: neutron - owner: "{{ NEUTRON_DB_USER }}" - sudo: yes - sudo_user: neutron - - - name: Initiate neutron database - shell: | - neutron-db-manage \ - --config-file /etc/neutron/neutron.conf \ - --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \ - upgrade juno - sudo: yes - sudo_user: neutron diff --git a/openstack/usr/share/openstack/neutron/api-paste.ini b/openstack/usr/share/openstack/neutron/api-paste.ini deleted file mode 100644 index bbcd4152..00000000 --- a/openstack/usr/share/openstack/neutron/api-paste.ini +++ /dev/null @@ -1,30 +0,0 @@ -[composite:neutron] -use = egg:Paste#urlmap -/: neutronversions -/v2.0: neutronapi_v2_0 - -[composite:neutronapi_v2_0] -use = call:neutron.auth:pipeline_factory -noauth = request_id catch_errors extensions neutronapiapp_v2_0 -keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 - -[filter:request_id] -paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory - -[filter:catch_errors] -paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory - -[filter:keystonecontext] -paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory - -[filter:extensions] -paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory - -[app:neutronversions] -paste.app_factory = neutron.api.versions:Versions.factory - -[app:neutronapiapp_v2_0] -paste.app_factory = neutron.api.v2.router:APIRouter.factory diff --git a/openstack/usr/share/openstack/neutron/dhcp_agent.ini b/openstack/usr/share/openstack/neutron/dhcp_agent.ini deleted file mode 100644 index c6c2b9a7..00000000 --- a/openstack/usr/share/openstack/neutron/dhcp_agent.ini +++ /dev/null @@ -1,89 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -use_syslog = True - -# The DHCP agent will resync its state with Neutron to recover from any -# transient notification or rpc errors. The interval is number of -# seconds between attempts. -# resync_interval = 5 - -# The DHCP agent requires an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, -# BigSwitch/Floodlight) -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Name of Open vSwitch bridge to use -# ovs_integration_bridge = br-int - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -# ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires -# no additional setup of the DHCP server. -dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# The DHCP server can assist with providing metadata support on isolated -# networks. Setting this value to True will cause the DHCP server to append -# specific host routes to the DHCP request. The metadata service will only -# be activated when the subnet does not contain any router port. The guest -# instance must be configured to request host routes via DHCP (Option 121). -enable_isolated_metadata = True - -# Allows for serving metadata requests coming from a dedicated metadata -# access network whose cidr is 169.254.169.254/16 (or larger prefix), and -# is connected to a Neutron router from which the VMs send metadata -# request. In this case DHCP Option 121 will not be injected in VMs, as -# they will be able to reach 169.254.169.254 through a router. -# This option requires enable_isolated_metadata = True -# enable_metadata_network = False - -# Number of threads to use during sync process. Should not exceed connection -# pool size configured on server. -# num_sync_threads = 4 - -# Location to store DHCP server config files -# dhcp_confs = $state_path/dhcp - -# Domain to use for building the hostnames -# dhcp_domain = openstacklocal - -# Override the default dnsmasq settings with this file -# dnsmasq_config_file = - -# Comma-separated list of DNS servers which will be used by dnsmasq -# as forwarders. -# dnsmasq_dns_servers = - -# Limit number of leases to prevent a denial-of-service. -# dnsmasq_lease_max = 16777216 - -# Location to DHCP lease relay UNIX domain socket -# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# dhcp_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the dhcp agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a dhcp server is disabled. -# dhcp_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/openstack/usr/share/openstack/neutron/fwaas_driver.ini b/openstack/usr/share/openstack/neutron/fwaas_driver.ini deleted file mode 100644 index 41f761ab..00000000 --- a/openstack/usr/share/openstack/neutron/fwaas_driver.ini +++ /dev/null @@ -1,3 +0,0 @@ -[fwaas] -#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver -#enabled = True diff --git a/openstack/usr/share/openstack/neutron/l3_agent.ini b/openstack/usr/share/openstack/neutron/l3_agent.ini deleted file mode 100644 index 000cd997..00000000 --- a/openstack/usr/share/openstack/neutron/l3_agent.ini +++ /dev/null @@ -1,103 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -use_syslog = True - -# L3 requires that an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) -# that supports L3 agent -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -# ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# If use_namespaces is set as False then the agent can only configure one router. - -# This is done by setting the specific router_id. -# router_id = - -# When external_network_bridge is set, each L3 agent can be associated -# with no more than one external network. This value should be set to the UUID -# of that external network. To allow L3 agent support multiple external -# networks, both the external_network_bridge and gateway_external_network_id -# must be left empty. -# gateway_external_network_id = - -# Indicates that this L3 agent should also handle routers that do not have -# an external network gateway configured. This option should be True only -# for a single agent in a Neutron deployment, and may be False for all agents -# if all routers must have an external network gateway -# handle_internal_only_routers = True - -# Name of bridge used for external network traffic. This should be set to -# empty value for the linux bridge. when this parameter is set, each L3 agent -# can be associated with no more than one external network. -external_network_bridge = br-ex - -# TCP Port used by Neutron metadata server -# metadata_port = 9697 - -# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 -# to disable this feature. -# send_arp_for_ha = 3 - -# seconds between re-sync routers' data if needed -# periodic_interval = 40 - -# seconds to start to sync routers' data after -# starting agent -# periodic_fuzzy_delay = 5 - -# enable_metadata_proxy, which is true by default, can be set to False -# if the Nova metadata server is not available -# enable_metadata_proxy = True - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# router_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the L3 agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a router is destroyed. -# router_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 - -# The working mode for the agent. Allowed values are: -# - legacy: this preserves the existing behavior where the L3 agent is -# deployed on a centralized networking node to provide L3 services -# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR. -# - dvr: this mode enables DVR functionality, and must be used for an L3 -# agent that runs on a compute host. -# - dvr_snat: this enables centralized SNAT support in conjunction with -# DVR. This mode must be used for an L3 agent running on a centralized -# node (or in single-host deployments, e.g. devstack). -# agent_mode = legacy - -# Location to store keepalived and all HA configurations -# ha_confs_path = $state_path/ha_confs - -# VRRP authentication type AH/PASS -# ha_vrrp_auth_type = PASS - -# VRRP authentication password -# ha_vrrp_auth_password = - -# The advertisement interval in seconds -# ha_vrrp_advert_int = 2 diff --git a/openstack/usr/share/openstack/neutron/lbaas_agent.ini b/openstack/usr/share/openstack/neutron/lbaas_agent.ini deleted file mode 100644 index 68a2759e..00000000 --- a/openstack/usr/share/openstack/neutron/lbaas_agent.ini +++ /dev/null @@ -1,42 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output). -# debug = False - -# The LBaaS agent will resync its state with Neutron to recover from any -# transient notification or rpc errors. The interval is number of -# seconds between attempts. -# periodic_interval = 10 - -# LBaas requires an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP, -# BigSwitch/Floodlight) -# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -# ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version. -# Multiple device drivers reflecting different service providers could be specified: -# device_driver = path.to.provider1.driver.Driver -# device_driver = path.to.provider2.driver.Driver -# Default is: -# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver - -[haproxy] -# Location to store config and state files -# loadbalancer_state_path = $state_path/lbaas - -# The user group -# user_group = nogroup - -# When delete and re-add the same vip, send this many gratuitous ARPs to flush -# the ARP cache in the Router. Set it below or equal to 0 to disable this feature. -# send_gratuitous_arp = 3 diff --git a/openstack/usr/share/openstack/neutron/metadata_agent.ini b/openstack/usr/share/openstack/neutron/metadata_agent.ini deleted file mode 100644 index ed238770..00000000 --- a/openstack/usr/share/openstack/neutron/metadata_agent.ini +++ /dev/null @@ -1,60 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = True -use_syslog = True - -# The Neutron user information for accessing the Neutron API. -auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 -auth_region = regionOne -# Turn off verification of the certificate for ssl -# auth_insecure = False -# Certificate Authority public key (CA cert) file for ssl -# auth_ca_cert = -admin_tenant_name = service -admin_user = {{ NEUTRON_SERVICE_USER }} -admin_password = {{ NEUTRON_SERVICE_PASSWORD }} - -# Network service endpoint type to pull from the keystone catalog -# endpoint_type = adminURL - -# IP address used by Nova metadata server -nova_metadata_ip = {{ CONTROLLER_HOST_ADDRESS }} - -# TCP Port used by Nova metadata server -# nova_metadata_port = 8775 - -# Which protocol to use for requests to Nova metadata server, http or https -# nova_metadata_protocol = http - -# Whether insecure SSL connection should be accepted for Nova metadata server -# requests -# nova_metadata_insecure = False - -# Client certificate for nova api, needed when nova api requires client -# certificates -# nova_client_cert = - -# Private key for nova client certificate -# nova_client_priv_key = - -# When proxying metadata requests, Neutron signs the Instance-ID header with a -# shared secret to prevent spoofing. You may select any string for a secret, -# but it must match here and in the configuration used by the Nova Metadata -# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret -metadata_proxy_shared_secret = {{ METADATA_PROXY_SHARED_SECRET }} - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# Number of separate worker processes for metadata server. Defaults to -# half the number of CPU cores -# metadata_workers = - -# Number of backlog requests to configure the metadata server socket with -# metadata_backlog = 4096 - -# URL to connect to the cache backend. -# default_ttl=0 parameter will cause cache entries to never expire. -# Otherwise default_ttl specifies time in seconds a cache entry is valid for. -# No cache is used in case no value is passed. -# cache_url = memory://?default_ttl=5 diff --git a/openstack/usr/share/openstack/neutron/metering_agent.ini b/openstack/usr/share/openstack/neutron/metering_agent.ini deleted file mode 100644 index 88826ce7..00000000 --- a/openstack/usr/share/openstack/neutron/metering_agent.ini +++ /dev/null @@ -1,18 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = True - -# Default driver: -# driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver -# Example of non-default driver -# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver - -# Interval between two metering measures -# measure_interval = 30 - -# Interval between two metering reports -# report_interval = 300 - -# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# use_namespaces = True diff --git a/openstack/usr/share/openstack/neutron/neutron.conf b/openstack/usr/share/openstack/neutron/neutron.conf deleted file mode 100644 index 51de7464..00000000 --- a/openstack/usr/share/openstack/neutron/neutron.conf +++ /dev/null @@ -1,640 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -# verbose = False - -# =========Start Global Config Option for Distributed L3 Router=============== -# Setting the "router_distributed" flag to "True" will default to the creation -# of distributed tenant routers. The admin can override this flag by specifying -# the type of the router on the create request (admin-only attribute). Default -# value is "False" to support legacy mode (centralized) routers. -# -# router_distributed = False -# -# ===========End Global Config Option for Distributed L3 Router=============== - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -# debug = False - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -use_syslog = True - -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -# log_dir = - -# publish_errors = False - -# Address to bind the API server to -# bind_host = 0.0.0.0 - -# Port the bind the API server to -# bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -service_plugins = router -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# DVR Base MAC address. The first 3 octets will remain unchanged. If the -# 4th octet is not 00, it will also be used. The others will be randomly -# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to -# avoid mixing them up with MAC's allocated for tenant ports. -# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00 -# The default is 3 octet -# dvr_base_mac = fa:16:3f:00:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds). Use -1 to -# tell dnsmasq to use infinite lease times. -# dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet. For IPv6, validate only if -# gateway is not a link local address. Deprecated, to be removed during the -# K release, at which point the check will be mandatory. -# force_gateway_on_subnet = True - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# Maximum number of routes per router -# max_routes = 30 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -# agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Allow automatic rescheduling of routers from dead L3 agents with -# admin_state_up set to True to alive agents. -# allow_automatic_l3agent_failover = False - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== items for l3 extension ============== -# Enable high availability for virtual routers. -# l3_ha = False -# -# Maximum number of l3 agents which a HA router will be scheduled on. If it -# is set to 0 the router will be scheduled on every agent. -# max_l3_agents_per_router = 3 -# -# Minimum number of l3 agents which a HA router will be scheduled on. The -# default value is 2. -# min_l3_agents_per_router = 2 -# -# CIDR of the administrative network if HA mode is enabled -# l3_ha_net_cidr = 169.254.192.0/18 -# =========== end of items for l3 extension ======= - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -# api_workers = 0 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -# rpc_workers = 0 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = regionOne - -# Username for connection to nova in admin context -nova_admin_username = {{ NOVA_SERVICE_USER }} - -# The uuid of the admin nova tenant -nova_admin_tenant_id = {{ SERVICE_TENANT_ID }} - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_SERVICE_PASSWORD }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 - -# CA file for novaclient to verify server certificates -# nova_ca_certificates_file = - -# Boolean to control ignoring SSL errors on the nova url -# nova_api_insecure = False - -# Number of seconds between sending events to nova if there are any events to send -# send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -# -# Options defined in oslo.messaging -# - -# Use durable queues in amqp. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues=false - -# Auto-delete queues in amqp. (boolean value) -#amqp_auto_delete=false - -# Size of RPC connection pool. (integer value) -#rpc_conn_pool_size=30 - -# Qpid broker hostname. (string value) -#qpid_hostname=localhost - -# Qpid broker port. (integer value) -#qpid_port=5672 - -# Qpid HA cluster host:port pairs. (list value) -#qpid_hosts=$qpid_hostname:$qpid_port - -# Username for Qpid connection. (string value) -#qpid_username= - -# Password for Qpid connection. (string value) -#qpid_password= - -# Space separated list of SASL mechanisms to use for auth. -# (string value) -#qpid_sasl_mechanisms= - -# Seconds between connection keepalive heartbeats. (integer -# value) -#qpid_heartbeat=60 - -# Transport to use, either 'tcp' or 'ssl'. (string value) -#qpid_protocol=tcp - -# Whether to disable the Nagle algorithm. (boolean value) -#qpid_tcp_nodelay=true - -# The qpid topology version to use. Version 1 is what was -# originally used by impl_qpid. Version 2 includes some -# backwards-incompatible changes that allow broker federation -# to work. Users should update to version 2 when they are -# able to take everything down, as it requires a clean break. -# (integer value) -#qpid_topology_version=1 - -# SSL version to use (valid only if SSL enabled). valid values -# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some -# distributions. (string value) -#kombu_ssl_version= - -# SSL key file (valid only if SSL enabled). (string value) -#kombu_ssl_keyfile= - -# SSL cert file (valid only if SSL enabled). (string value) -#kombu_ssl_certfile= - -# SSL certification authority file (valid only if SSL -# enabled). (string value) -#kombu_ssl_ca_certs= - -# How long to wait before reconnecting in response to an AMQP -# consumer cancel notification. (floating point value) -#kombu_reconnect_delay=1.0 - -# The RabbitMQ broker address where a single node is used. -# (string value) -rabbit_host={{ RABBITMQ_HOST }} - -# The RabbitMQ broker port where a single node is used. -# (integer value) -rabbit_port={{ RABBITMQ_PORT }} - -# RabbitMQ HA cluster host:port pairs. (list value) -#rabbit_hosts=$rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -#rabbit_use_ssl=false - -# The RabbitMQ userid. (string value) -rabbit_userid={{ RABBITMQ_USER }} - -# The RabbitMQ password. (string value) -rabbit_password={{ RABBITMQ_PASSWORD }} - -# the RabbitMQ login method (string value) -#rabbit_login_method=AMQPLAIN - -# The RabbitMQ virtual host. (string value) -#rabbit_virtual_host=/ - -# How frequently to retry connecting with RabbitMQ. (integer -# value) -#rabbit_retry_interval=1 - -# How long to backoff for between retries when connecting to -# RabbitMQ. (integer value) -#rabbit_retry_backoff=2 - -# Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -#rabbit_max_retries=0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change -# this option, you must wipe the RabbitMQ database. (boolean -# value) -#rabbit_ha_queues=false - -# If passed, use a fake RabbitMQ provider. (boolean value) -#fake_rabbit=false - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet -# interface, or IP. The "host" option should point or resolve -# to this address. (string value) -#rpc_zmq_bind_address=* - -# MatchMaker driver. (string value) -#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost - -# ZeroMQ receiver listening port. (integer value) -#rpc_zmq_port=9501 - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -#rpc_zmq_contexts=1 - -# Maximum number of ingress messages to locally buffer per -# topic. Default is unlimited. (integer value) -#rpc_zmq_topic_backlog= - -# Directory for holding IPC sockets. (string value) -#rpc_zmq_ipc_dir=/var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP -# address. Must match "host" option, if running Nova. (string -# value) -#rpc_zmq_host=oslo - -# Seconds to wait before a cast expires (TTL). Only supported -# by impl_zmq. (integer value) -#rpc_cast_timeout=30 - -# Heartbeat frequency. (integer value) -#matchmaker_heartbeat_freq=300 - -# Heartbeat time-to-live. (integer value) -#matchmaker_heartbeat_ttl=600 - -# Size of RPC greenthread pool. (integer value) -#rpc_thread_pool_size=64 - -# Driver or drivers to handle sending notifications. (multi -# valued) -notification_driver=neutron.openstack.common.notifier.rpc_notifier - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -#notification_topics=notifications - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout=60 - -# A URL representing the messaging driver to use and its full -# configuration. If not set, we fall back to the rpc_backend -# option and driver specific configuration. (string value) -#transport_url= - -# The messaging driver to use, defaults to rabbit. Other -# drivers include qpid and zmq. (string value) -rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the -# transport_url option. (string value) -#control_exchange=openstack - - -[matchmaker_redis] - -# -# Options defined in oslo.messaging -# - -# Host to locate redis. (string value) -#host=127.0.0.1 - -# Use this port to connect to redis host. (integer value) -#port=6379 - -# Password for Redis server (optional). (string value) -#password= - - -[matchmaker_ring] - -# -# Options defined in oslo.messaging -# - -# Matchmaker ring file (JSON). (string value) -# Deprecated group/name - [DEFAULT]/matchmaker_ringfile -#ringfile=/etc/oslo/matchmaker_ring.json - -[quotas] -# Default driver to use for quota checks -# quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -# quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -# default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -# quota_network = 10 - -# Number of subnets allowed per tenant. A negative value means unlimited. -# quota_subnet = 10 - -# Number of ports allowed per tenant. A negative value means unlimited. -# quota_port = 50 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -# quota_security_group = 10 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -# quota_security_group_rule = 100 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitor = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -# Number of firewalls allowed per tenant. A negative value means unlimited. -# quota_firewall = 1 - -# Number of firewall policies allowed per tenant. A negative value means -# unlimited. -# quota_firewall_policy = 1 - -# Number of firewall rules allowed per tenant. A negative value means -# unlimited. -# quota_firewall_rule = 100 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -# root_helper = sudo -root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -# report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 -identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357 -admin_tenant_name = service -admin_user = {{ NEUTRON_SERVICE_USER }} -admin_password = {{ NEUTRON_SERVICE_PASSWORD }} - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:// -# NOTE: In deployment the [database] section and its connection attribute may -# be set in the corresponding core plugin '.ini' file. However, it is suggested -# to put the [database] section and its connection attribute in this -# configuration file. -#connection=sqlite:////var/lib/neutron/neutron.sqlite -connection=postgresql://{{ NEUTRON_DB_USER }}:{{ NEUTRON_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/neutron - -# Database engine for which script will be generated when using offline -# migration -# engine = - -# The SQLAlchemy connection string used to connect to the slave database -# slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -# max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -# retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -# min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -# max_pool_size = 10 - -# Timeout in seconds before idle sql connections are reaped -# idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -# max_overflow = 20 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -# connection_debug = 0 - -# Add python stack traces to SQL as comment strings -# connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -# pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default -# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'. -#service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default -# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend -# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default diff --git a/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini b/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini deleted file mode 100644 index 256f7855..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini +++ /dev/null @@ -1,114 +0,0 @@ -# Config file for neutron-proxy-plugin. - -[restproxy] -# All configuration for this plugin is in section '[restproxy]' -# -# The following parameters are supported: -# servers : [,]* (Error if not set) -# server_auth : (default: no auth) -# server_ssl : True | False (default: True) -# ssl_cert_directory : (default: /etc/neutron/plugins/bigswitch/ssl) -# no_ssl_validation : True | False (default: False) -# ssl_sticky : True | False (default: True) -# sync_data : True | False (default: False) -# auto_sync_on_failure : True | False (default: True) -# consistency_interval : (default: 60 seconds) -# server_timeout : (default: 10 seconds) -# neutron_id : (default: neutron-) -# add_meta_server_route : True | False (default: True) -# thread_pool_size : (default: 4) - -# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover. -servers=localhost:8080 - -# The username and password for authenticating against the BigSwitch or Floodlight controller. -# server_auth=username:password - -# Use SSL when connecting to the BigSwitch or Floodlight controller. -# server_ssl=True - -# Directory which contains the ca_certs and host_certs to be used to validate -# controller certificates. -# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/ - -# If a certificate does not exist for a controller, trust and store the first -# certificate received for that controller and use it to validate future -# connections to that controller. -# ssl_sticky=True - -# Do not validate the controller certificates for SSL -# Warning: This will not provide protection against man-in-the-middle attacks -# no_ssl_validation=False - -# Sync data on connect -# sync_data=False - -# If neutron fails to create a resource because the backend controller -# doesn't know of a dependency, automatically trigger a full data -# synchronization to the controller. -# auto_sync_on_failure=True - -# Time between verifications that the backend controller -# database is consistent with Neutron. (0 to disable) -# consistency_interval = 60 - -# Maximum number of seconds to wait for proxy request to connect and complete. -# server_timeout=10 - -# User defined identifier for this Neutron deployment -# neutron_id = - -# Flag to decide if a route to the metadata server should be injected into the VM -# add_meta_server_route = True - -# Number of threads to use to handle large volumes of port creation requests -# thread_pool_size = 4 - -[nova] -# Specify the VIF_TYPE that will be controlled on the Nova compute instances -# options: ivs or ovs -# default: ovs -# vif_type = ovs - -# Overrides for vif types based on nova compute node host IDs -# Comma separated list of host IDs to fix to a specific VIF type -# The VIF type is taken from the end of the configuration item -# node_override_vif_ -# For example, the following would set the VIF type to IVS for -# host-id1 and host-id2 -# node_overrride_vif_ivs=host-id1,host-id2 - -[router] -# Specify the default router rules installed in newly created tenant routers -# Specify multiple times for multiple rules -# Format is ::: -# Optionally, a comma-separated list of nexthops may be included after -# Use an * to specify default for all tenants -# Default is any any allow for all tenants -# tenant_default_router_rule=*:any:any:permit - -# Maximum number of rules that a single router may have -# Default is 200 -# max_router_rules=200 - -[restproxyagent] - -# Specify the name of the bridge used on compute nodes -# for attachment. -# Default: br-int -# integration_bridge=br-int - -# Change the frequency of polling by the restproxy agent. -# Value is seconds -# Default: 5 -# polling_interval=5 - -# Virtual switch type on the compute node. -# Options: ovs or ivs -# Default: ovs -# virtual_switch_type = ovs - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True diff --git a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README b/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README deleted file mode 100644 index e7e47a27..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README +++ /dev/null @@ -1,3 +0,0 @@ -Certificates in this folder will be used to -verify signatures for any controllers the plugin -connects to. diff --git a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README b/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README deleted file mode 100644 index 8f5f5e77..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README +++ /dev/null @@ -1,6 +0,0 @@ -Certificates in this folder must match the name -of the controller they should be used to authenticate -with a .pem extension. - -For example, the certificate for the controller -"192.168.0.1" should be named "192.168.0.1.pem". diff --git a/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini b/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini deleted file mode 100644 index 916e9e5d..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini +++ /dev/null @@ -1,29 +0,0 @@ -[switch] -# username = The SSH username to use -# password = The SSH password to use -# address = The address of the host to SSH to -# ostype = Should be NOS, but is unused otherwise -# -# Example: -# username = admin -# password = password -# address = 10.24.84.38 -# ostype = NOS - -[physical_interface] -# physical_interface = The network interface to use when creating a port -# -# Example: -# physical_interface = physnet1 - -[vlans] -# network_vlan_ranges = :nnnn:mmmm -# -# Example: -# network_vlan_ranges = physnet1:1000:2999 - -[linux_bridge] -# physical_interface_mappings = : -# -# Example: -# physical_interface_mappings = physnet1:em1 diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini deleted file mode 100644 index d99e8382..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini +++ /dev/null @@ -1,15 +0,0 @@ -[cfg_agent] -# (IntOpt) Interval in seconds for processing of service updates. -# That is when the config agent's process_services() loop executes -# and it lets each service helper to process its service resources. -# rpc_loop_interval = 10 - -# (StrOpt) Period-separated module path to the routing service helper class. -# routing_svc_helper_class = neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper - -# (IntOpt) Timeout value in seconds for connecting to a hosting device. -# device_connection_timeout = 30 - -# (IntOpt) The time in seconds until a backlogged hosting device is -# presumed dead or booted to an error state. -# hosting_device_dead_timeout = 300 diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini deleted file mode 100644 index 17eae737..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini +++ /dev/null @@ -1,100 +0,0 @@ -[cisco] - -# (StrOpt) A short prefix to prepend to the VLAN number when creating a -# VLAN interface. For example, if an interface is being created for -# VLAN 2001 it will be named 'q-2001' using the default prefix. -# -# vlan_name_prefix = q- -# Example: vlan_name_prefix = vnet- - -# (StrOpt) A short prefix to prepend to the VLAN number when creating a -# provider VLAN interface. For example, if an interface is being created -# for provider VLAN 3003 it will be named 'p-3003' using the default prefix. -# -# provider_vlan_name_prefix = p- -# Example: provider_vlan_name_prefix = PV- - -# (BoolOpt) A flag indicating whether Openstack networking should manage the -# creation and removal of VLAN interfaces for provider networks on the Nexus -# switches. If the flag is set to False then Openstack will not create or -# remove VLAN interfaces for provider networks, and the administrator needs -# to manage these interfaces manually or by external orchestration. -# -# provider_vlan_auto_create = True - -# (BoolOpt) A flag indicating whether Openstack networking should manage -# the adding and removing of provider VLANs from trunk ports on the Nexus -# switches. If the flag is set to False then Openstack will not add or -# remove provider VLANs from trunk ports, and the administrator needs to -# manage these operations manually or by external orchestration. -# -# provider_vlan_auto_trunk = True - -# (StrOpt) Period-separated module path to the model class to use for -# the Cisco neutron plugin. -# -# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2 - -# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches. -# Note: This feature is not supported on all models/versions of Cisco -# Nexus switches. To use this feature, all of the Nexus switches in the -# deployment must support it. -# nexus_l3_enable = False - -# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. -# svi_round_robin = False - -# Cisco Nexus Switch configurations. -# Each switch to be managed by Openstack Neutron must be configured here. -# -# N1KV Format. -# [N1KV:] -# username= -# password= -# -# Example: -# [N1KV:2.2.2.2] -# username=admin -# password=mySecretPassword - -[cisco_n1k] - -# (StrOpt) Specify the name of the integration bridge to which the VIFs are -# attached. -# Default value: br-int -# integration_bridge = br-int - -# (StrOpt) Name of the policy profile to be associated with a port when no -# policy profile is specified during port creates. -# Default value: service_profile -# default_policy_profile = service_profile - -# (StrOpt) Name of the policy profile to be associated with a port owned by -# network node (dhcp, router). -# Default value: dhcp_pp -# network_node_policy_profile = dhcp_pp - -# (StrOpt) Name of the network profile to be associated with a network when no -# network profile is specified during network creates. Admin should pre-create -# a network profile with this name. -# Default value: default_network_profile -# default_network_profile = network_pool - -# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in -# policy profiles. -# Default value: 60 -# poll_duration = 60 - -# (BoolOpt) Specify whether tenants are restricted from accessing all the -# policy profiles. -# Default value: False, indicating all tenants can access all policy profiles. -# -# restrict_policy_profiles = False - -# (IntOpt) Number of threads to use to make HTTP requests to the VSM. -# Default value: 4 -# http_pool_size = 4 - -# (IntOpt) Timeout duration in seconds for the http request -# Default value: 15 -# http_timeout = 15 diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini deleted file mode 100644 index 3ef271d2..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini +++ /dev/null @@ -1,76 +0,0 @@ -[general] -#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers -# backlog_processing_interval = 10 - -#(StrOpt) Name of the L3 admin tenant -# l3_admin_tenant = L3AdminTenant - -#(StrOpt) Name of management network for hosting device configuration -# management_network = osn_mgmt_nw - -#(StrOpt) Default security group applied on management port -# default_security_group = mgmt_sec_grp - -#(IntOpt) Seconds of no status update until a cfg agent is considered down -# cfg_agent_down_time = 60 - -#(StrOpt) Path to templates for hosting devices -# templates_path = /opt/stack/data/neutron/cisco/templates - -#(StrOpt) Path to config drive files for service VM instances -# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive - -#(BoolOpt) Ensure that Nova is running before attempting to create any VM -# ensure_nova_running = True - -[hosting_devices] -# Settings coupled to CSR1kv VM devices -# ------------------------------------- -#(StrOpt) Name of Glance image for CSR1kv -# csr1kv_image = csr1kv_openstack_img - -#(StrOpt) UUID of Nova flavor for CSR1kv -# csr1kv_flavor = 621 - -#(StrOpt) Plugging driver for CSR1kv -# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver - -#(StrOpt) Hosting device driver for CSR1kv -# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver - -#(StrOpt) Config agent router service driver for CSR1kv -# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver - -#(StrOpt) Configdrive template file for CSR1kv -# csr1kv_configdrive_template = csr1kv_cfg_template - -#(IntOpt) Booting time in seconds before a CSR1kv becomes operational -# csr1kv_booting_time = 420 - -#(StrOpt) Username to use for CSR1kv configurations -# csr1kv_username = stack - -#(StrOpt) Password to use for CSR1kv configurations -# csr1kv_password = cisco - -[n1kv] -# Settings coupled to inter-working with N1kv plugin -# -------------------------------------------------- -#(StrOpt) Name of N1kv port profile for management ports -# management_port_profile = osn_mgmt_pp - -#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic -# from VXLAN segmented networks). -# t1_port_profile = osn_t1_pp - -#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic -# from VLAN segmented networks). -# t2_port_profile = osn_t2_pp - -#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks -# for VXLAN segmented traffic). -# t1_network_profile = osn_t1_np - -#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks -# for VLAN segmented traffic). -# t2_network_profile = osn_t2_np diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini deleted file mode 100644 index 0aee17eb..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini +++ /dev/null @@ -1,26 +0,0 @@ -[cisco_csr_ipsec] -# Status check interval in seconds, for VPNaaS IPSec connections used on CSR -# status_check_interval = 60 - -# Cisco CSR management port information for REST access used by VPNaaS -# TODO(pcm): Remove once CSR is integrated in as a Neutron router. -# -# Format is: -# [cisco_csr_rest:] -# rest_mgmt = -# tunnel_ip = -# username = -# password = -# timeout = -# host = -# tunnel_if = -# -# where: -# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR) -# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel -# mgmt port IP -- IP address of CSR for REST API access -# user ---------- Username for REST management port access to Cisco CSR -# password ------ Password for REST management port access to Cisco CSR -# timeout ------- REST request timeout to Cisco CSR (optional) -# hostname ------ Name of host where CSR is running as a VM -# tunnel I/F ---- CSR port name used for tunnels' IP address diff --git a/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini b/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini deleted file mode 100644 index 0ca9b46f..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini +++ /dev/null @@ -1,41 +0,0 @@ -[heleos] -#configure the ESM management address -#in the first version of this plugin, only one ESM can be specified -#Example: -#esm_mgmt= - -#configure admin username and password -#admin_username= -#admin_password= - -#router image id -#Example: -#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0 - -#mgmt shared security zone id -#defines the shared management security zone. Each tenant can have a private one configured through the ESM -#Example: -#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a - -#in-band shared security zone id -#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM -#Example: -#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc - -#oob-band shared security zone id -#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM -#Example: -#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871 - -#dummy security zone id -#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces -#Example: -#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08 - -#resource pool id -#define the shared resource pool. Each tenant can have a private one configured through the ESM -#Example -#resource_pool_id= - -#define if the requests have to be executed asynchronously by the plugin or not -#async_requests= diff --git a/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini deleted file mode 100644 index 5eeec570..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini +++ /dev/null @@ -1,63 +0,0 @@ -[hyperv] -# (StrOpt) Type of network to allocate for tenant networks. The -# default value 'local' is useful only for single-box testing and -# provides no connectivity between hosts. You MUST either change this -# to 'vlan' and configure network_vlan_ranges below or to 'flat'. -# Set to 'none' to disable creation of tenant networks. -# -# tenant_network_type = local -# Example: tenant_network_type = vlan - -# (ListOpt) Comma-separated list of -# [::] tuples enumerating ranges -# of VLAN IDs on named physical networks that are available for -# allocation. All physical networks listed are available for flat and -# VLAN provider network creation. Specified ranges of VLAN IDs are -# available for tenant network allocation if tenant_network_type is -# 'vlan'. If empty, only gre and local networks may be created. -# -# network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999 - -[agent] -# Agent's polling interval in seconds -# polling_interval = 2 - -# (ListOpt) Comma separated list of : -# where the physical networks can be expressed with wildcards, -# e.g.: ."*:external". -# The referred external virtual switches need to be already present on -# the Hyper-V server. -# If a given physical network name will not match any value in the list -# the plugin will look for a virtual switch with the same name. -# -# physical_network_vswitch_mappings = *:external -# Example: physical_network_vswitch_mappings = net1:external1,net2:external2 - -# (StrOpt) Private virtual switch name used for local networking. -# -# local_network_vswitch = private -# Example: local_network_vswitch = custom_vswitch - -# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's -# metric APIs. Collected data can by retrieved by other apps and services, -# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above. -# -# enable_metrics_collection = False - -#----------------------------------------------------------------------------- -# Sample Configurations. -#----------------------------------------------------------------------------- -# -# Neutron server: -# -# [HYPERV] -# tenant_network_type = vlan -# network_vlan_ranges = default:2000:3999 -# -# Agent running on Hyper-V node: -# -# [AGENT] -# polling_interval = 2 -# physical_network_vswitch_mappings = *:external -# local_network_vswitch = private diff --git a/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini deleted file mode 100644 index 0fab5070..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini +++ /dev/null @@ -1,50 +0,0 @@ -[sdnve] -# (ListOpt) The IP address of one (or more) SDN-VE controllers -# Default value is: controller_ips = 127.0.0.1 -# Example: controller_ips = 127.0.0.1,127.0.0.2 -# (StrOpt) The integration bridge for OF based implementation -# The default value for integration_bridge is None -# Example: integration_bridge = br-int -# (ListOpt) The interface mapping connecting the integration -# bridge to external network as a list of physical network names and -# interfaces: : -# Example: interface_mappings = default:eth2 -# (BoolOpt) Used to reset the integration bridge, if exists -# The default value for reset_bridge is True -# Example: reset_bridge = False -# (BoolOpt) Used to set the OVS controller as out-of-band -# The default value for out_of_band is True -# Example: out_of_band = False -# -# (BoolOpt) The fake controller for testing purposes -# Default value is: use_fake_controller = False -# (StrOpt) The port number for use with controller -# The default value for the port is 8443 -# Example: port = 8443 -# (StrOpt) The userid for use with controller -# The default value for the userid is admin -# Example: userid = sdnve_user -# (StrOpt) The password for use with controller -# The default value for the password is admin -# Example: password = sdnve_password -# -# (StrOpt) The default type of tenants (and associated resources) -# Available choices are: OVERLAY or OF -# The default value for tenant type is OVERLAY -# Example: default_tenant_type = OVERLAY -# (StrOpt) The string in tenant description that indicates -# Default value for OF tenants: of_signature = SDNVE-OF -# (StrOpt) The string in tenant description that indicates -# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY - -[sdnve_agent] -# (IntOpt) Agent's polling interval in seconds -# polling_interval = 2 -# (StrOpt) What to use for root helper -# The default value: root_helper = 'sudo' -# (BoolOpt) Whether to use rpc or not -# The default value: rpc = True - -[securitygroup] -# The security group is not supported: -# firewall_driver = neutron.agent.firewall.NoopFirewallDriver diff --git a/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini b/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini deleted file mode 100644 index 94fe9803..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini +++ /dev/null @@ -1,78 +0,0 @@ -[vlans] -# (StrOpt) Type of network to allocate for tenant networks. The -# default value 'local' is useful only for single-box testing and -# provides no connectivity between hosts. You MUST change this to -# 'vlan' and configure network_vlan_ranges below in order for tenant -# networks to provide connectivity between hosts. Set to 'none' to -# disable creation of tenant networks. -# -# tenant_network_type = local -# Example: tenant_network_type = vlan - -# (ListOpt) Comma-separated list of -# [::] tuples enumerating ranges -# of VLAN IDs on named physical networks that are available for -# allocation. All physical networks listed are available for flat and -# VLAN provider network creation. Specified ranges of VLAN IDs are -# available for tenant network allocation if tenant_network_type is -# 'vlan'. If empty, only local networks may be created. -# -# network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999 - -[linux_bridge] -# (ListOpt) Comma-separated list of -# : tuples mapping physical -# network names to the agent's node-specific physical network -# interfaces to be used for flat and VLAN networks. All physical -# networks listed in network_vlan_ranges on the server should have -# mappings to appropriate interfaces on each agent. -# -# physical_interface_mappings = -# Example: physical_interface_mappings = physnet1:eth1 - -[vxlan] -# (BoolOpt) enable VXLAN on the agent -# VXLAN support can be enabled when agent is managed by ml2 plugin using -# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin. -# enable_vxlan = False -# -# (IntOpt) use specific TTL for vxlan interface protocol packets -# ttl = -# -# (IntOpt) use specific TOS for vxlan interface protocol packets -# tos = -# -# (StrOpt) multicast group to use for broadcast emulation. -# This group must be the same on all the agents. -# vxlan_group = 224.0.0.1 -# -# (StrOpt) Local IP address to use for VXLAN endpoints (required) -# local_ip = -# -# (BoolOpt) Flag to enable l2population extension. This option should be used -# in conjunction with ml2 plugin l2population mechanism driver (in that case, -# both linuxbridge and l2population mechanism drivers should be loaded). -# It enables plugin to populate VXLAN forwarding table, in order to limit -# the use of broadcast emulation (multicast will be turned off if kernel and -# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10) -# l2_population = False - -[agent] -# Agent's polling interval in seconds -# polling_interval = 2 - -# (BoolOpt) Enable server RPC compatibility with old (pre-havana) -# agents. -# -# rpc_support_old_agents = False -# Example: rpc_support_old_agents = True - -[securitygroup] -# Firewall driver for realizing neutron security group function -# firewall_driver = neutron.agent.firewall.NoopFirewallDriver -# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True diff --git a/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini b/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini deleted file mode 100644 index 2b9bfa5e..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini +++ /dev/null @@ -1,31 +0,0 @@ -# Config file for Metaplugin - -[meta] -# Comma separated list of flavor:neutron_plugin for plugins to load. -# Extension method is searched in the list order and the first one is used. -plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2' - -# Comma separated list of flavor:neutron_plugin for L3 service plugins -# to load. -# This is intended for specifying L2 plugins which support L3 functions. -# If you use a router service plugin, set this blank. -l3_plugin_list = - -# Default flavor to use, when flavor:network is not specified at network -# creation. -default_flavor = 'nvp' - -# Default L3 flavor to use, when flavor:router is not specified at router -# creation. -# Ignored if 'l3_plugin_list' is blank. -default_l3_flavor = - -# Comma separated list of supported extension aliases. -supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler' - -# Comma separated list of method:flavor to select specific plugin for a method. -# This has priority over method search order based on 'plugin_list'. -extension_map = 'get_port_stats:nvp' - -# Specifies flavor for plugin to handle 'q-plugin' RPC requests. -rpc_flavor = 'ml2' diff --git a/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini b/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini deleted file mode 100644 index f2e94052..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini +++ /dev/null @@ -1,19 +0,0 @@ - -[midonet] -# MidoNet API server URI -# midonet_uri = http://localhost:8080/midonet-api - -# MidoNet admin username -# username = admin - -# MidoNet admin password -# password = passw0rd - -# ID of the project that MidoNet admin user belongs to -# project_id = 77777777-7777-7777-7777-777777777777 - -# Virtual provider router ID -# provider_router_id = 00112233-0011-0011-0011-001122334455 - -# Path to midonet host uuid file -# midonet_host_uuid_path = /etc/midolman/host_uuid.properties diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini deleted file mode 100644 index b8097ce2..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini +++ /dev/null @@ -1,86 +0,0 @@ -[ml2] -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan -type_drivers = flat,gre - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan -tenant_network_types = gre - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade -mechanism_drivers = openvswitch - -# (ListOpt) Ordered list of extension driver entrypoints -# to be loaded from the neutron.ml2.extension_drivers namespace. -# extension_drivers = -# Example: extension_drivers = anewextensiondriver - -[ml2_type_flat] -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -# flat_networks = -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * -flat_networks = External - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -# network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 -#network_vlan_ranges = Physnet1:100:200 - -[ml2_type_gre] -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -tunnel_id_ranges = 1:1000 - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -# vni_ranges = - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -# vxlan_group = -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -enable_security_group = True - -# Use ipset to speed-up the iptables security groups. Enabling ipset support -# requires that ipset is installed on L2 agent node. -enable_ipset = True - -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver - -[ovs] -local_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} -enable_tunneling = True -bridge_mappings=External:br-ex - -[agent] -tunnel_types = gre diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini deleted file mode 100644 index abaf5bc7..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini +++ /dev/null @@ -1,100 +0,0 @@ -# Defines configuration options specific for Arista ML2 Mechanism driver - -[ml2_arista] -# (StrOpt) EOS IP address. This is required field. If not set, all -# communications to Arista EOS will fail -# -# eapi_host = -# Example: eapi_host = 192.168.0.1 -# -# (StrOpt) EOS command API username. This is required field. -# if not set, all communications to Arista EOS will fail. -# -# eapi_username = -# Example: arista_eapi_username = admin -# -# (StrOpt) EOS command API password. This is required field. -# if not set, all communications to Arista EOS will fail. -# -# eapi_password = -# Example: eapi_password = my_password -# -# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs -# ("node1.domain.com") or as short names ("node1"). This is -# optional. If not set, a value of "True" is assumed. -# -# use_fqdn = -# Example: use_fqdn = True -# -# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. -# This field defines how often the synchronization is performed. -# This is an optional field. If not set, a value of 180 seconds -# is assumed. -# -# sync_interval = -# Example: sync_interval = 60 -# -# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller. -# This is useful when multiple OpenStack/Neutron controllers are -# managing the same Arista HW clusters. Note that this name must -# match with the region name registered (or known) to keystone -# service. Authentication with Keysotne is performed by EOS. -# This is optional. If not set, a value of "RegionOne" is assumed. -# -# region_name = -# Example: region_name = RegionOne - - -[l3_arista] - -# (StrOpt) primary host IP address. This is required field. If not set, all -# communications to Arista EOS will fail. This is the host where -# primary router is created. -# -# primary_l3_host = -# Example: primary_l3_host = 192.168.10.10 -# -# (StrOpt) Primary host username. This is required field. -# if not set, all communications to Arista EOS will fail. -# -# primary_l3_host_username = -# Example: arista_primary_l3_username = admin -# -# (StrOpt) Primary host password. This is required field. -# if not set, all communications to Arista EOS will fail. -# -# primary_l3_host_password = -# Example: primary_l3_password = my_password -# -# (StrOpt) IP address of the second Arista switch paired as -# MLAG (Multi-chassis Link Aggregation) with the first. -# This is optional field, however, if mlag_config flag is set, -# then this is a required field. If not set, all -# communications to Arista EOS will fail. If mlag_config is set -# to False, then this field is ignored -# -# seconadary_l3_host = -# Example: seconadary_l3_host = 192.168.10.20 -# -# (BoolOpt) Defines if Arista switches are configured in MLAG mode -# If yes, all L3 configuration is pushed to both switches -# automatically. If this flag is set, ensure that secondary_l3_host -# is set to the second switch's IP. -# This flag is Optional. If not set, a value of "False" is assumed. -# -# mlag_config = -# Example: mlag_config = True -# -# (BoolOpt) Defines if the router is created in default VRF or a -# a specific VRF. This is optional. -# If not set, a value of "False" is assumed. -# -# Example: use_vrf = True -# -# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. -# This field defines how often the synchronization is performed. -# This is an optional field. If not set, a value of 180 seconds -# is assumed. -# -# l3_sync_interval = -# Example: l3_sync_interval = 60 diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini deleted file mode 100644 index 67574110..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini +++ /dev/null @@ -1,15 +0,0 @@ -[ml2_brocade] -# username = -# password = -# address = -# ostype = NOS -# osversion = autodetect | n.n.n -# physical_networks = physnet1,physnet2 -# -# Example: -# username = admin -# password = password -# address = 10.24.84.38 -# ostype = NOS -# osversion = 4.1.1 -# physical_networks = physnet1,physnet2 diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini deleted file mode 100644 index 1b69100e..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini +++ /dev/null @@ -1,118 +0,0 @@ -[ml2_cisco] - -# (StrOpt) A short prefix to prepend to the VLAN number when creating a -# VLAN interface. For example, if an interface is being created for -# VLAN 2001 it will be named 'q-2001' using the default prefix. -# -# vlan_name_prefix = q- -# Example: vlan_name_prefix = vnet- - -# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. -# svi_round_robin = False - -# -# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch. -# This string value must be present in the ml2_conf.ini network_vlan_ranges -# variable. -# -# managed_physical_network = -# Example: managed_physical_network = physnet1 - -# Cisco Nexus Switch configurations. -# Each switch to be managed by Openstack Neutron must be configured here. -# -# Cisco Nexus Switch Format. -# [ml2_mech_cisco_nexus:] -# = (1) -# ssh_port= (2) -# username= (3) -# password= (4) -# -# (1) For each host connected to a port on the switch, specify the hostname -# and the Nexus physical port (interface) it is connected to. -# Valid intf_type's are 'ethernet' and 'port-channel'. -# The default setting for is 'ethernet' and need not be -# added to this setting. -# (2) The TCP port for connecting via SSH to manage the switch. This is -# port number 22 unless the switch has been configured otherwise. -# (3) The username for logging into the switch to manage it. -# (4) The password for logging into the switch to manage it. -# -# Example: -# [ml2_mech_cisco_nexus:1.1.1.1] -# compute1=1/1 -# compute2=ethernet:1/2 -# compute3=port-channel:1 -# ssh_port=22 -# username=admin -# password=mySecretPassword - -[ml2_cisco_apic] - -# Hostname:port list of APIC controllers -# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80 - -# Username for the APIC controller -# apic_username = user - -# Password for the APIC controller -# apic_password = password - -# Whether use SSl for connecting to the APIC controller or not -# apic_use_ssl = True - -# How to map names to APIC: use_uuid or use_name -# apic_name_mapping = use_name - -# Names for APIC objects used by Neutron -# Note: When deploying multiple clouds against one APIC, -# these names must be unique between the clouds. -# apic_vmm_domain = openstack -# apic_vlan_ns_name = openstack_ns -# apic_node_profile = openstack_profile -# apic_entity_profile = openstack_entity -# apic_function_profile = openstack_function -# apic_app_profile_name = openstack_app -# Agent timers for State reporting and topology discovery -# apic_sync_interval = 30 -# apic_agent_report_interval = 30 -# apic_agent_poll_interval = 2 - -# Specify your network topology. -# This section indicates how your compute nodes are connected to the fabric's -# switches and ports. The format is as follows: -# -# [apic_switch:] -# , = -# -# You can have multiple sections, one for each switch in your fabric that is -# participating in Openstack. e.g. -# -# [apic_switch:17] -# ubuntu,ubuntu1 = 1/10 -# ubuntu2,ubuntu3 = 1/11 -# -# [apic_switch:18] -# ubuntu5,ubuntu6 = 1/1 -# ubuntu7,ubuntu8 = 1/2 - -# Describe external connectivity. -# In this section you can specify the external network configuration in order -# for the plugin to be able to teach the fabric how to route the internal -# traffic to the outside world. The external connectivity configuration -# format is as follows: -# -# [apic_external_network:] -# switch = -# port = -# encap = -# cidr_exposed = -# gateway_ip = -# -# An example follows: -# [apic_external_network:network_ext] -# switch=203 -# port=1/34 -# encap=vlan-100 -# cidr_exposed=10.10.40.2/16 -# gateway_ip=10.10.40.1 diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini deleted file mode 100644 index 6ee4a4e0..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini +++ /dev/null @@ -1,52 +0,0 @@ -# Defines Configuration options for FSL SDN OS Mechanism Driver -# Cloud Resource Discovery (CRD) authorization credentials -[ml2_fslsdn] -#(StrOpt) User name for authentication to CRD. -# e.g.: user12 -# -# crd_user_name = - -#(StrOpt) Password for authentication to CRD. -# e.g.: secret -# -# crd_password = - -#(StrOpt) Tenant name for CRD service. -# e.g.: service -# -# crd_tenant_name = - -#(StrOpt) CRD auth URL. -# e.g.: http://127.0.0.1:5000/v2.0/ -# -# crd_auth_url = - -#(StrOpt) URL for connecting to CRD Service. -# e.g.: http://127.0.0.1:9797 -# -# crd_url= - -#(IntOpt) Timeout value for connecting to CRD service -# in seconds, e.g.: 30 -# -# crd_url_timeout= - -#(StrOpt) Region name for connecting to CRD in -# admin context, e.g.: RegionOne -# -# crd_region_name= - -#(BoolOpt)If set, ignore any SSL validation issues (boolean value) -# e.g.: False -# -# crd_api_insecure= - -#(StrOpt)Authorization strategy for connecting to CRD in admin -# context, e.g.: keystone -# -# crd_auth_strategy= - -#(StrOpt)Location of CA certificates file to use for CRD client -# requests. -# -# crd_ca_certificates_file= diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini deleted file mode 100644 index 46139aed..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini +++ /dev/null @@ -1,4 +0,0 @@ -[eswitch] -# (StrOpt) Type of Network Interface to allocate for VM: -# mlnx_direct or hostdev according to libvirt terminology -# vnic_type = mlnx_direct diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini deleted file mode 100644 index dbbfcbd2..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini +++ /dev/null @@ -1,28 +0,0 @@ -# Defines configuration options specific to the Tail-f NCS Mechanism Driver - -[ml2_ncs] -# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack -# subtree. -# If this is not set then no HTTP requests will be made. -# -# url = -# Example: url = http://ncs/api/running/services/openstack - -# (StrOpt) Username for HTTP basic authentication to NCS. -# This is an optional parameter. If unspecified then no authentication is used. -# -# username = -# Example: username = admin - -# (StrOpt) Password for HTTP basic authentication to NCS. -# This is an optional parameter. If unspecified then no authentication is used. -# -# password = -# Example: password = admin - -# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion. -# This is an optional parameter, default value is 10 seconds. -# -# timeout = -# Example: timeout = 15 - diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini deleted file mode 100644 index 9e88c1bb..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini +++ /dev/null @@ -1,30 +0,0 @@ -# Configuration for the OpenDaylight MechanismDriver - -[ml2_odl] -# (StrOpt) OpenDaylight REST URL -# If this is not set then no HTTP requests will be made. -# -# url = -# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron - -# (StrOpt) Username for HTTP basic authentication to ODL. -# -# username = -# Example: username = admin - -# (StrOpt) Password for HTTP basic authentication to ODL. -# -# password = -# Example: password = admin - -# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion. -# This is an optional parameter, default value is 10 seconds. -# -# timeout = 10 -# Example: timeout = 15 - -# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout. -# This is an optional parameter, default value is 30 minutes. -# -# session_timeout = 30 -# Example: session_timeout = 60 diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini deleted file mode 100644 index 4a94b987..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini +++ /dev/null @@ -1,13 +0,0 @@ -# Defines configuration options specific to the OpenFlow Agent Mechanism Driver - -[ovs] -# Please refer to configuration options to the OpenvSwitch - -[agent] -# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath. -# This is an optional parameter, default value is 60 seconds. -# -# get_datapath_retry_times = -# Example: get_datapath_retry_times = 30 - -# Please refer to configuration options to the OpenvSwitch else the above. diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini deleted file mode 100644 index 9566f54c..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini +++ /dev/null @@ -1,31 +0,0 @@ -# Defines configuration options for SRIOV NIC Switch MechanismDriver -# and Agent - -[ml2_sriov] -# (ListOpt) Comma-separated list of -# supported Vendor PCI Devices, in format vendor_id:product_id -# -# supported_pci_vendor_devs = 15b3:1004, 8086:10c9 -# Example: supported_pci_vendor_devs = 15b3:1004 -# -# (BoolOpt) Requires running SRIOV neutron agent for port binding -# agent_required = True - -[sriov_nic] -# (ListOpt) Comma-separated list of : -# tuples mapping physical network names to the agent's node-specific -# physical network device interfaces of SR-IOV physical function to be used -# for VLAN networks. All physical networks listed in network_vlan_ranges on -# the server should have mappings to appropriate interfaces on each agent. -# -# physical_device_mappings = -# Example: physical_device_mappings = physnet1:eth1 -# -# (ListOpt) Comma-separated list of : -# tuples, mapping network_device to the agent's node-specific list of virtual -# functions that should not be used for virtual networking. -# vfs_to_exclude is a semicolon-separated list of virtual -# functions to exclude from network_device. The network_device in the -# mapping should appear in the physical_device_mappings list. -# exclude_devices = -# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3 diff --git a/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini b/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini deleted file mode 100644 index b1225111..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini +++ /dev/null @@ -1,79 +0,0 @@ -[mlnx] -# (StrOpt) Type of network to allocate for tenant networks. The -# default value is 'vlan' You MUST configure network_vlan_ranges below -# in order for tenant networks to provide connectivity between hosts. -# Set to 'none' to disable creation of tenant networks. -# -# tenant_network_type = vlan -# Example: tenant_network_type = vlan - -# (ListOpt) Comma-separated list of -# [::] tuples enumerating ranges -# of VLAN IDs on named physical networks that are available for -# allocation. All physical networks listed are available for flat and -# VLAN provider network creation. Specified ranges of VLAN IDs are -# available for tenant network allocation if tenant_network_type is -# 'vlan'. If empty, only local networks may be created. -# -# network_vlan_ranges = -# Example: network_vlan_ranges = default:1:100 - -# (ListOpt) Comma-separated list of -# : tuples mapping physical -# network names to physical network types. All physical -# networks listed in network_vlan_ranges should have -# mappings to appropriate physical network type. -# Type of the physical network can be either eth (Ethernet) or -# ib (InfiniBand). If empty, physical network eth type is assumed. -# -# physical_network_type_mappings = -# Example: physical_network_type_mappings = default:eth - -# (StrOpt) Type of the physical network, can be either 'eth' or 'ib' -# The default value is 'eth' -# physical_network_type = eth - -[eswitch] -# (ListOpt) Comma-separated list of -# : tuples mapping physical -# network names to the agent's node-specific physical network -# interfaces to be used for flat and VLAN networks. All physical -# networks listed in network_vlan_ranges on the server should have -# mappings to appropriate interfaces on each agent. -# -# physical_interface_mappings = -# Example: physical_interface_mappings = default:eth2 - -# (StrOpt) Type of Network Interface to allocate for VM: -# direct or hosdev according to libvirt terminology -# vnic_type = mlnx_direct - -# (StrOpt) Eswitch daemon end point connection url -# daemon_endpoint = 'tcp://127.0.0.1:60001' - -# The number of milliseconds the agent will wait for -# response on request to daemon -# request_timeout = 3000 - -# The number of retries the agent will send request -# to daemon before giving up -# retries = 3 - -# The backoff rate multiplier for waiting period between retries -# on request to daemon, i.e. value of 2 will double -# the request timeout each retry -# backoff_rate = 2 - -[agent] -# Agent's polling interval in seconds -# polling_interval = 2 - -# (BoolOpt) Enable server RPC compatibility with old (pre-havana) -# agents. -# -# rpc_support_old_agents = False - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True diff --git a/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini b/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini deleted file mode 100644 index aa4171da..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini +++ /dev/null @@ -1,60 +0,0 @@ -# Sample Configurations - -[ovs] -# Do not change this parameter unless you have a good reason to. -# This is the name of the OVS integration bridge. There is one per hypervisor. -# The integration bridge acts as a virtual "patch port". All VM VIFs are -# attached to this bridge and then "patched" according to their network -# connectivity. -# integration_bridge = br-int - -[agent] -# Agent's polling interval in seconds -# polling_interval = 2 - -[securitygroup] -# Firewall driver for realizing neutron security group function -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver - -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True - -[ofc] -# Specify OpenFlow Controller Host, Port and Driver to connect. -# host = 127.0.0.1 -# port = 8888 - -# Base URL of OpenFlow Controller REST API. -# It is prepended to a path of each API request. -# path_prefix = - -# Drivers are in neutron/plugins/nec/drivers/ . -# driver = trema - -# PacketFilter is available when it's enabled in this configuration -# and supported by the driver. -# enable_packet_filter = true - -# Use SSL to connect -# use_ssl = false - -# Key file -# key_file = - -# Certificate file -# cert_file = - -# Disable SSL certificate verification -# insecure_ssl = false - -# Maximum attempts per OFC API request. NEC plugin retries -# API request to OFC when OFC returns ServiceUnavailable (503). -# The value must be greater than 0. -# api_max_attempts = 3 - -[provider] -# Default router provider to use. -# default_router_provider = l3-agent -# List of enabled router providers. -# router_providers = l3-agent,openflow diff --git a/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini deleted file mode 100644 index aad37bd5..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini +++ /dev/null @@ -1,41 +0,0 @@ -# Please fill in the correct data for all the keys below and uncomment key-value pairs -[restproxy] -# (StrOpt) Default Network partition in which VSD will -# orchestrate network resources using openstack -# -#default_net_partition_name = - -# (StrOpt) Nuage provided uri for initial authorization to -# access VSD -# -#auth_resource = /auth - -# (StrOpt) IP Address and Port of VSD -# -#server = ip:port - -# (StrOpt) Organization name in which VSD will orchestrate -# network resources using openstack -# -#organization = org - -# (StrOpt) Username and password of VSD for authentication -# -#serverauth = uname:pass - -# (BoolOpt) Boolean for SSL connection with VSD server -# -#serverssl = True - -# (StrOpt) Nuage provided base uri to reach out to VSD -# -#base_uri = /base - -[syncmanager] -# (BoolOpt) Boolean to enable sync between openstack and VSD -# -#enable_sync = False - -# (IntOpt) Sync interval in seconds between openstack and VSD -# -#sync_interval = 0 \ No newline at end of file diff --git a/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini b/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini deleted file mode 100644 index a1c05d97..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini +++ /dev/null @@ -1,35 +0,0 @@ -[nvsd] -# Configure the NVSD controller. The plugin proxies the api calls using -# to NVSD controller which implements the required functionality. - -# IP address of NVSD controller api server -# nvsd_ip = - -# Port number of NVSD controller api server -# nvsd_port = 8082 - -# Authentication credentials to access the api server -# nvsd_user = -# nvsd_passwd = - -# API request timeout in seconds -# request_timeout = - -# Maximum number of retry attempts to login to the NVSD controller -# Specify 0 to retry until success (default) -# nvsd_retries = 0 - -[securitygroup] -# Specify firewall_driver option, if neutron security groups are disabled, -# then NoopFirewallDriver otherwise OVSHybridIptablesFirewallDriver. -# firewall_driver = neutron.agent.firewall.NoopFirewallDriver - -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True - -[agent] -# root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf - -[database] -# connection = mysql://root:@127.0.0.1/?charset=utf8 diff --git a/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini b/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini deleted file mode 100644 index 629f1fc4..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini +++ /dev/null @@ -1,26 +0,0 @@ -# OpenContrail is an Apache 2.0-licensed project that is built using -# standards-based protocols and provides all the necessary components for -# network virtualization–SDN controller, virtual router, analytics engine, -# and published northbound APIs -# For more information visit: http://opencontrail.org - -# Opencontrail plugin specific configuration -[CONTRAIL] -# (StrOpt) IP address to connect to opencontrail controller. -# Uncomment this line for specifying the IP address of the opencontrail -# Api-Server. -# Default value is local host(127.0.0.1). -# api_server_ip='127.0.0.1' - -# (IntOpt) port to connect to opencontrail controller. -# Uncomment this line for the specifying the Port of the opencontrail -# Api-Server. -# Default value is 8082 -# api_server_port=8082 - -# (DictOpt) enable opencontrail extensions -# Opencontrail in future would support extension such as ipam, policy, -# these extensions can be configured as shown below. Plugin will then -# load the specified extensions. -# Default value is None, it wont load any extension -# contrail_extensions=ipam:,policy: diff --git a/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini deleted file mode 100644 index 9c8e6b58..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini +++ /dev/null @@ -1,190 +0,0 @@ -[ovs] -# (StrOpt) Type of network to allocate for tenant networks. The -# default value 'local' is useful only for single-box testing and -# provides no connectivity between hosts. You MUST either change this -# to 'vlan' and configure network_vlan_ranges below or change this to -# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for -# tenant networks to provide connectivity between hosts. Set to 'none' -# to disable creation of tenant networks. -# -# tenant_network_type = local -# Example: tenant_network_type = gre -# Example: tenant_network_type = vxlan - -# (ListOpt) Comma-separated list of -# [::] tuples enumerating ranges -# of VLAN IDs on named physical networks that are available for -# allocation. All physical networks listed are available for flat and -# VLAN provider network creation. Specified ranges of VLAN IDs are -# available for tenant network allocation if tenant_network_type is -# 'vlan'. If empty, only gre, vxlan and local networks may be created. -# -# network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999 - -# (BoolOpt) Set to True in the server and the agents to enable support -# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and -# GRE or VXLAN tunneling. -# -# WARNING: This option will be deprecated in the Icehouse release, at which -# point setting tunnel_type below will be required to enable -# tunneling. -# -# enable_tunneling = False - -# (StrOpt) The type of tunnel network, if any, supported by the plugin. If -# this is set, it will cause tunneling to be enabled. If this is not set and -# the option enable_tunneling is set, this will default to 'gre'. -# -# tunnel_type = -# Example: tunnel_type = gre -# Example: tunnel_type = vxlan - -# (ListOpt) Comma-separated list of : tuples -# enumerating ranges of GRE or VXLAN tunnel IDs that are available for -# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'. -# -# tunnel_id_ranges = -# Example: tunnel_id_ranges = 1:1000 - -# Do not change this parameter unless you have a good reason to. -# This is the name of the OVS integration bridge. There is one per hypervisor. -# The integration bridge acts as a virtual "patch bay". All VM VIFs are -# attached to this bridge and then "patched" according to their network -# connectivity. -# -# integration_bridge = br-int - -# Only used for the agent if tunnel_id_ranges (above) is not empty for -# the server. In most cases, the default value should be fine. -# -# tunnel_bridge = br-tun - -# Peer patch port in integration bridge for tunnel bridge -# int_peer_patch_port = patch-tun - -# Peer patch port in tunnel bridge for integration bridge -# tun_peer_patch_port = patch-int - -# Uncomment this line for the agent if tunnel_id_ranges (above) is not -# empty for the server. Set local-ip to be the local IP address of -# this hypervisor. -# -# local_ip = - -# (ListOpt) Comma-separated list of : tuples -# mapping physical network names to the agent's node-specific OVS -# bridge names to be used for flat and VLAN networks. The length of -# bridge names should be no more than 11. Each bridge must -# exist, and should have a physical network interface configured as a -# port. All physical networks listed in network_vlan_ranges on the -# server should have mappings to appropriate bridges on each agent. -# -# bridge_mappings = -# Example: bridge_mappings = physnet1:br-eth1 - -# (BoolOpt) Use veths instead of patch ports to interconnect the integration -# bridge to physical networks. Support kernel without ovs patch port support -# so long as it is set to True. -# use_veth_interconnection = False - -[agent] -# Agent's polling interval in seconds -# polling_interval = 2 - -# Minimize polling by monitoring ovsdb for interface changes -# minimize_polling = True - -# When minimize_polling = True, the number of seconds to wait before -# respawning the ovsdb monitor after losing communication with it -# ovsdb_monitor_respawn_interval = 30 - -# (ListOpt) The types of tenant network tunnels supported by the agent. -# Setting this will enable tunneling support in the agent. This can be set to -# either 'gre' or 'vxlan'. If this is unset, it will default to [] and -# disable tunneling support in the agent. When running the agent with the OVS -# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section. -# When running the agent with ML2, you can specify as many values here as -# your compute hosts supports. -# -# tunnel_types = -# Example: tunnel_types = gre -# Example: tunnel_types = vxlan -# Example: tunnel_types = vxlan, gre - -# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By -# default, this will make use of the Open vSwitch default value of '4789' if -# not specified. -# -# vxlan_udp_port = -# Example: vxlan_udp_port = 8472 - -# (IntOpt) This is the MTU size of veth interfaces. -# Do not change unless you have a good reason to. -# The default MTU size of veth interfaces is 1500. -# This option has no effect if use_veth_interconnection is False -# veth_mtu = -# Example: veth_mtu = 1504 - -# (BoolOpt) Flag to enable l2-population extension. This option should only be -# used in conjunction with ml2 plugin and l2population mechanism driver. It'll -# enable plugin to populate remote ports macs and IPs (using fdb_add/remove -# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to -# optimize tunnel management. -# -# l2_population = False - -# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2 -# population ML2 MechanismDriver. -# -# arp_responder = False - -# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet -# carrying GRE/VXLAN tunnel. The default value is True. -# -# dont_fragment = True - -# (BoolOpt) Set to True on L2 agents to enable support -# for distributed virtual routing. -# -# enable_distributed_routing = False - -[securitygroup] -# Firewall driver for realizing neutron security group function. -# firewall_driver = neutron.agent.firewall.NoopFirewallDriver -# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver - -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True - -#----------------------------------------------------------------------------- -# Sample Configurations. -#----------------------------------------------------------------------------- -# -# 1. With VLANs on eth1. -# [ovs] -# network_vlan_ranges = default:2000:3999 -# tunnel_id_ranges = -# integration_bridge = br-int -# bridge_mappings = default:br-eth1 -# -# 2. With GRE tunneling. -# [ovs] -# network_vlan_ranges = -# tunnel_id_ranges = 1:1000 -# integration_bridge = br-int -# tunnel_bridge = br-tun -# local_ip = 10.0.0.3 -# -# 3. With VXLAN tunneling. -# [ovs] -# network_vlan_ranges = -# tenant_network_type = vxlan -# tunnel_type = vxlan -# tunnel_id_ranges = 1:1000 -# integration_bridge = br-int -# tunnel_bridge = br-tun -# local_ip = 10.0.0.3 -# [agent] -# tunnel_types = vxlan diff --git a/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini b/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini deleted file mode 100644 index bfe8062a..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini +++ /dev/null @@ -1,14 +0,0 @@ -# Config file for Neutron PLUMgrid Plugin - -[plumgriddirector] -# This line should be pointing to the PLUMgrid Director, -# for the PLUMgrid platform. -# director_server= -# director_server_port= -# Authentification parameters for the Director. -# These are the admin credentials to manage and control -# the PLUMgrid Director server. -# username= -# password= -# servertimeout=5 -# driver= diff --git a/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini b/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini deleted file mode 100644 index 9d9cfa25..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini +++ /dev/null @@ -1,44 +0,0 @@ -[ovs] -# integration_bridge = br-int - -# openflow_rest_api = : -# openflow_rest_api = 127.0.0.1:8080 - -# tunnel key range: 0 < tunnel_key_min < tunnel_key_max -# VLAN: 12bits, GRE, VXLAN: 24bits -# tunnel_key_min = 1 -# tunnel_key_max = 0xffffff - -# tunnel_ip = -# tunnel_interface = interface for tunneling -# when tunnel_ip is NOT specified, ip address is read -# from this interface -# tunnel_ip = -# tunnel_interface = -tunnel_interface = eth0 - -# ovsdb_port = port number on which ovsdb is listening -# ryu-agent uses this parameter to setup ovsdb. -# ovs-vsctl set-manager ptcp: -# See set-manager section of man ovs-vsctl for details. -# currently ptcp is only supported. -# ovsdb_ip = -# ovsdb_interface = interface for ovsdb -# when ovsdb_addr NOT specifiied, ip address is gotten -# from this interface -# ovsdb_port = 6634 -# ovsdb_ip = -# ovsdb_interface = -ovsdb_interface = eth0 - -[securitygroup] -# Firewall driver for realizing neutron security group function -# firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver - -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True - -[agent] -# Agent's polling interval in seconds -# polling_interval = 2 diff --git a/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini b/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini deleted file mode 100644 index baca73b8..00000000 --- a/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini +++ /dev/null @@ -1,200 +0,0 @@ -[DEFAULT] -# User name for NSX controller -# nsx_user = admin - -# Password for NSX controller -# nsx_password = admin - -# Time before aborting a request on an unresponsive controller (Seconds) -# http_timeout = 75 - -# Maximum number of times a particular request should be retried -# retries = 2 - -# Maximum number of times a redirect response should be followed -# redirects = 2 - -# Comma-separated list of NSX controller endpoints (:). When port -# is omitted, 443 is assumed. This option MUST be specified, e.g.: -# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80 - -# UUID of the pre-existing default NSX Transport zone to be used for creating -# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.: -# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53 - -# (Optional) UUID for the default l3 gateway service to use with this cluster. -# To be specified if planning to use logical routers with external gateways. -# default_l3_gw_service_uuid = - -# (Optional) UUID for the default l2 gateway service to use with this cluster. -# To be specified for providing a predefined gateway tenant for connecting their networks. -# default_l2_gw_service_uuid = - -# (Optional) UUID for the default service cluster. A service cluster is introduced to -# represent a group of gateways and it is needed in order to use Logical Services like -# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this -# config parameter *MUST BE* set to a valid pre-existent service cluster uuid. -# default_service_cluster_uuid = - -# Name of the default interface name to be used on network-gateway. This value -# will be used for any device associated with a network gateway for which an -# interface name was not specified -# default_interface_name = breth0 - -[quotas] -# number of network gateways allowed per tenant, -1 means unlimited -# quota_network_gateway = 5 - -[vcns] -# URL for VCNS manager -# manager_uri = https://management_ip - -# User name for VCNS manager -# user = admin - -# Password for VCNS manager -# password = default - -# (Optional) Datacenter ID for Edge deployment -# datacenter_moid = - -# (Optional) Deployment Container ID for NSX Edge deployment -# If not specified, either a default global container will be used, or -# the resource pool and datastore specified below will be used -# deployment_container_id = - -# (Optional) Resource pool ID for NSX Edge deployment -# resource_pool_id = - -# (Optional) Datastore ID for NSX Edge deployment -# datastore_id = - -# (Required) UUID of logic switch for physical network connectivity -# external_network = - -# (Optional) Asynchronous task status check interval -# default is 2000 (millisecond) -# task_status_check_interval = 2000 - -[nsx] -# Maximum number of ports for each bridged logical switch -# The recommended value for this parameter varies with NSX version -# Please use: -# NSX 2.x -> 64 -# NSX 3.0, 3.1 -> 5000 -# NSX 3.2 -> 10000 -# max_lp_per_bridged_ls = 5000 - -# Maximum number of ports for each overlay (stt, gre) logical switch -# max_lp_per_overlay_ls = 256 - -# Number of connections to each controller node. -# default is 10 -# concurrent_connections = 10 - -# Number of seconds a generation id should be valid for (default -1 meaning do not time out) -# nsx_gen_timeout = -1 - -# Acceptable values for 'metadata_mode' are: -# - 'access_network': this enables a dedicated connection to the metadata -# proxy for metadata server access via Neutron router. -# - 'dhcp_host_route': this enables host route injection via the dhcp agent. -# This option is only useful if running on a host that does not support -# namespaces otherwise access_network should be used. -# metadata_mode = access_network - -# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt) -# default_transport_type = stt - -# Specifies in which mode the plugin needs to operate in order to provide DHCP and -# metadata proxy services to tenant instances. If 'agent' is chosen (default) -# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to -# provide such services. In this mode, the plugin supports API extensions 'agent' -# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse), -# the plugin will use NSX logical services for DHCP and metadata proxy. This -# simplifies the deployment model for Neutron, in that the plugin no longer requires -# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode -# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above. -# Furthermore, a 'combined' mode is also provided and is used to support existing -# deployments that want to adopt the agentless mode going forward. With this mode, -# existing networks keep being served by the existing infrastructure (thus preserving -# backward compatibility, whereas new networks will be served by the new infrastructure. -# Migration tools are provided to 'move' one network from one model to another; with -# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is -# ignored, as new networks will no longer be scheduled to existing dhcp agents. -# agent_mode = agent - -# Specifies which mode packet replication should be done in. If set to service -# a service node is required in order to perform packet replication. This can -# also be set to source if one wants replication to be performed locally (NOTE: -# usually only useful for testing if one does not want to deploy a service node). -# In order to leverage distributed routers, replication_mode should be set to -# "service". -# replication_mode = service - -[nsx_sync] -# Interval in seconds between runs of the status synchronization task. -# The plugin will aim at resynchronizing operational status for all -# resources in this interval, and it should be therefore large enough -# to ensure the task is feasible. Otherwise the plugin will be -# constantly synchronizing resource status, ie: a new task is started -# as soon as the previous is completed. -# If this value is set to 0, the state synchronization thread for this -# Neutron instance will be disabled. -# state_sync_interval = 10 - -# Random additional delay between two runs of the state synchronization task. -# An additional wait time between 0 and max_random_sync_delay seconds -# will be added on top of state_sync_interval. -# max_random_sync_delay = 0 - -# Minimum delay, in seconds, between two status synchronization requests for NSX. -# Depending on chunk size, controller load, and other factors, state -# synchronization requests might be pretty heavy. This means the -# controller might take time to respond, and its load might be quite -# increased by them. This parameter allows to specify a minimum -# interval between two subsequent requests. -# The value for this parameter must never exceed state_sync_interval. -# If this does, an error will be raised at startup. -# min_sync_req_delay = 1 - -# Minimum number of resources to be retrieved from NSX in a single status -# synchronization request. -# The actual size of the chunk will increase if the number of resources is such -# that using the minimum chunk size will cause the interval between two -# requests to be less than min_sync_req_delay -# min_chunk_size = 500 - -# Enable this option to allow punctual state synchronization on show -# operations. In this way, show operations will always fetch the operational -# status of the resource from the NSX backend, and this might have -# a considerable impact on overall performance. -# always_read_status = False - -[nsx_lsn] -# Pull LSN information from NSX in case it is missing from the local -# data store. This is useful to rebuild the local store in case of -# server recovery -# sync_on_missing_data = False - -[nsx_dhcp] -# (Optional) Comma separated list of additional dns servers. Default is an empty list -# extra_domain_name_servers = - -# Domain to use for building the hostnames -# domain_name = openstacklocal - -# Default DHCP lease time -# default_lease_time = 43200 - -[nsx_metadata] -# IP address used by Metadata server -# metadata_server_address = 127.0.0.1 - -# TCP Port used by Metadata server -# metadata_server_port = 8775 - -# When proxying metadata requests, Neutron signs the Instance-ID header with a -# shared secret to prevent spoofing. You may select any string for a secret, -# but it MUST match with the configuration used by the Metadata server -# metadata_shared_secret = diff --git a/openstack/usr/share/openstack/neutron/policy.json b/openstack/usr/share/openstack/neutron/policy.json deleted file mode 100644 index e7db4357..00000000 --- a/openstack/usr/share/openstack/neutron/policy.json +++ /dev/null @@ -1,138 +0,0 @@ -{ - "context_is_admin": "role:admin", - "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", - "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", - "admin_only": "rule:context_is_admin", - "regular_user": "", - "shared": "field:networks:shared=True", - "shared_firewalls": "field:firewalls:shared=True", - "external": "field:networks:router:external=True", - "default": "rule:admin_or_owner", - - "create_subnet": "rule:admin_or_network_owner", - "get_subnet": "rule:admin_or_owner or rule:shared", - "update_subnet": "rule:admin_or_network_owner", - "delete_subnet": "rule:admin_or_network_owner", - - "create_network": "", - "get_network": "rule:admin_or_owner or rule:shared or rule:external", - "get_network:router:external": "rule:regular_user", - "get_network:segments": "rule:admin_only", - "get_network:provider:network_type": "rule:admin_only", - "get_network:provider:physical_network": "rule:admin_only", - "get_network:provider:segmentation_id": "rule:admin_only", - "get_network:queue_id": "rule:admin_only", - "create_network:shared": "rule:admin_only", - "create_network:router:external": "rule:admin_only", - "create_network:segments": "rule:admin_only", - "create_network:provider:network_type": "rule:admin_only", - "create_network:provider:physical_network": "rule:admin_only", - "create_network:provider:segmentation_id": "rule:admin_only", - "update_network": "rule:admin_or_owner", - "update_network:segments": "rule:admin_only", - "update_network:shared": "rule:admin_only", - "update_network:provider:network_type": "rule:admin_only", - "update_network:provider:physical_network": "rule:admin_only", - "update_network:provider:segmentation_id": "rule:admin_only", - "update_network:router:external": "rule:admin_only", - "delete_network": "rule:admin_or_owner", - - "create_port": "", - "create_port:mac_address": "rule:admin_or_network_owner", - "create_port:fixed_ips": "rule:admin_or_network_owner", - "create_port:port_security_enabled": "rule:admin_or_network_owner", - "create_port:binding:host_id": "rule:admin_only", - "create_port:binding:profile": "rule:admin_only", - "create_port:mac_learning_enabled": "rule:admin_or_network_owner", - "get_port": "rule:admin_or_owner", - "get_port:queue_id": "rule:admin_only", - "get_port:binding:vif_type": "rule:admin_only", - "get_port:binding:vif_details": "rule:admin_only", - "get_port:binding:host_id": "rule:admin_only", - "get_port:binding:profile": "rule:admin_only", - "update_port": "rule:admin_or_owner", - "update_port:fixed_ips": "rule:admin_or_network_owner", - "update_port:port_security_enabled": "rule:admin_or_network_owner", - "update_port:binding:host_id": "rule:admin_only", - "update_port:binding:profile": "rule:admin_only", - "update_port:mac_learning_enabled": "rule:admin_or_network_owner", - "delete_port": "rule:admin_or_owner", - - "get_router:ha": "rule:admin_only", - "create_router": "rule:regular_user", - "create_router:external_gateway_info:enable_snat": "rule:admin_only", - "create_router:distributed": "rule:admin_only", - "create_router:ha": "rule:admin_only", - "get_router": "rule:admin_or_owner", - "get_router:distributed": "rule:admin_only", - "update_router:external_gateway_info:enable_snat": "rule:admin_only", - "update_router:distributed": "rule:admin_only", - "update_router:ha": "rule:admin_only", - "delete_router": "rule:admin_or_owner", - - "add_router_interface": "rule:admin_or_owner", - "remove_router_interface": "rule:admin_or_owner", - - "create_firewall": "", - "get_firewall": "rule:admin_or_owner", - "create_firewall:shared": "rule:admin_only", - "get_firewall:shared": "rule:admin_only", - "update_firewall": "rule:admin_or_owner", - "update_firewall:shared": "rule:admin_only", - "delete_firewall": "rule:admin_or_owner", - - "create_firewall_policy": "", - "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", - "create_firewall_policy:shared": "rule:admin_or_owner", - "update_firewall_policy": "rule:admin_or_owner", - "delete_firewall_policy": "rule:admin_or_owner", - - "create_firewall_rule": "", - "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", - "update_firewall_rule": "rule:admin_or_owner", - "delete_firewall_rule": "rule:admin_or_owner", - - "create_qos_queue": "rule:admin_only", - "get_qos_queue": "rule:admin_only", - - "update_agent": "rule:admin_only", - "delete_agent": "rule:admin_only", - "get_agent": "rule:admin_only", - - "create_dhcp-network": "rule:admin_only", - "delete_dhcp-network": "rule:admin_only", - "get_dhcp-networks": "rule:admin_only", - "create_l3-router": "rule:admin_only", - "delete_l3-router": "rule:admin_only", - "get_l3-routers": "rule:admin_only", - "get_dhcp-agents": "rule:admin_only", - "get_l3-agents": "rule:admin_only", - "get_loadbalancer-agent": "rule:admin_only", - "get_loadbalancer-pools": "rule:admin_only", - - "create_floatingip": "rule:regular_user", - "update_floatingip": "rule:admin_or_owner", - "delete_floatingip": "rule:admin_or_owner", - "get_floatingip": "rule:admin_or_owner", - - "create_network_profile": "rule:admin_only", - "update_network_profile": "rule:admin_only", - "delete_network_profile": "rule:admin_only", - "get_network_profiles": "", - "get_network_profile": "", - "update_policy_profiles": "rule:admin_only", - "get_policy_profiles": "", - "get_policy_profile": "", - - "create_metering_label": "rule:admin_only", - "delete_metering_label": "rule:admin_only", - "get_metering_label": "rule:admin_only", - - "create_metering_label_rule": "rule:admin_only", - "delete_metering_label_rule": "rule:admin_only", - "get_metering_label_rule": "rule:admin_only", - - "get_service_provider": "rule:regular_user", - "get_lsn": "rule:admin_only", - "create_lsn": "rule:admin_only" -} diff --git a/openstack/usr/share/openstack/neutron/vpn_agent.ini b/openstack/usr/share/openstack/neutron/vpn_agent.ini deleted file mode 100644 index c3089df9..00000000 --- a/openstack/usr/share/openstack/neutron/vpn_agent.ini +++ /dev/null @@ -1,14 +0,0 @@ -[DEFAULT] -# VPN-Agent configuration file -# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also - -[vpnagent] -# vpn device drivers which vpn agent will use -# If we want to use multiple drivers, we need to define this option multiple times. -# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver -# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver -# vpn_device_driver=another_driver - -[ipsec] -# Status check interval -# ipsec_status_check_interval=60 diff --git a/openstack/usr/share/openstack/nova-config.yml b/openstack/usr/share/openstack/nova-config.yml deleted file mode 100644 index 4f43db39..00000000 --- a/openstack/usr/share/openstack/nova-config.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/nova.conf" - tasks: - - name: Create the nova user. - user: - name: nova - comment: Openstack Nova Daemons - shell: /sbin/nologin - home: /var/lib/nova - groups: libvirt - append: yes - - - name: Create the /var folders for nova - file: - path: "{{ item }}" - state: directory - owner: nova - group: nova - with_items: - - /var/run/nova - - /var/lock/nova - - /var/log/nova - - /var/lib/nova - - /var/lib/nova/instances - - - file: path=/etc/nova state=directory - - name: Add the configuration needed for nova in /etc/nova using templates - template: - src: /usr/share/openstack/nova/{{ item }} - dest: /etc/nova/{{ item }} - with_lines: - - cd /usr/share/openstack/nova && find -type f diff --git a/openstack/usr/share/openstack/nova-db.yml b/openstack/usr/share/openstack/nova-db.yml deleted file mode 100644 index e7dc5b10..00000000 --- a/openstack/usr/share/openstack/nova-db.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/nova.conf" - tasks: - - name: Create nova service user in service tenant - keystone_user: - user: "{{ NOVA_SERVICE_USER }}" - password: "{{ NOVA_SERVICE_PASSWORD }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Assign admin role to nova service user in the service tenant - keystone_user: - role: admin - user: "{{ NOVA_SERVICE_USER }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Add nova endpoint - keystone_service: - name: nova - type: compute - description: Openstack Compute Service - publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2/%(tenant_id)s' - internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2/%(tenant_id)s' - adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2/%(tenant_id)s' - region: 'regionOne' - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - name: Create postgresql user for nova - postgresql_user: - name: "{{ NOVA_DB_USER }}" - login_host: "{{ CONTROLLER_HOST_ADDRESS }}" - password: "{{ NOVA_DB_PASSWORD }}" - sudo: yes - sudo_user: nova - - - name: Create database for nova services - postgresql_db: - name: nova - owner: "{{ NOVA_DB_USER }}" - login_host: "{{ CONTROLLER_HOST_ADDRESS }}" - sudo: yes - sudo_user: nova - - - name: Initiate nova database - nova_manage: - action: dbsync - sudo: yes - sudo_user: nova diff --git a/openstack/usr/share/openstack/nova/api-paste.ini b/openstack/usr/share/openstack/nova/api-paste.ini deleted file mode 100644 index 2a825a5b..00000000 --- a/openstack/usr/share/openstack/nova/api-paste.ini +++ /dev/null @@ -1,118 +0,0 @@ -############ -# Metadata # -############ -[composite:metadata] -use = egg:Paste#urlmap -/: meta - -[pipeline:meta] -pipeline = ec2faultwrap logrequest metaapp - -[app:metaapp] -paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory - -####### -# EC2 # -####### - -[composite:ec2] -use = egg:Paste#urlmap -/services/Cloud: ec2cloud - -[composite:ec2cloud] -use = call:nova.api.auth:pipeline_factory -noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor -keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor - -[filter:ec2faultwrap] -paste.filter_factory = nova.api.ec2:FaultWrapper.factory - -[filter:logrequest] -paste.filter_factory = nova.api.ec2:RequestLogging.factory - -[filter:ec2lockout] -paste.filter_factory = nova.api.ec2:Lockout.factory - -[filter:ec2keystoneauth] -paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory - -[filter:ec2noauth] -paste.filter_factory = nova.api.ec2:NoAuth.factory - -[filter:cloudrequest] -controller = nova.api.ec2.cloud.CloudController -paste.filter_factory = nova.api.ec2:Requestify.factory - -[filter:authorizer] -paste.filter_factory = nova.api.ec2:Authorizer.factory - -[filter:validator] -paste.filter_factory = nova.api.ec2:Validator.factory - -[app:ec2executor] -paste.app_factory = nova.api.ec2:Executor.factory - -############# -# OpenStack # -############# - -[composite:osapi_compute] -use = call:nova.api.openstack.urlmap:urlmap_factory -/: oscomputeversions -/v1.1: openstack_compute_api_v2 -/v2: openstack_compute_api_v2 -/v3: openstack_compute_api_v3 - -[composite:openstack_compute_api_v2] -use = call:nova.api.auth:pipeline_factory -noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2 -keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2 -keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2 - -[composite:openstack_compute_api_v3] -use = call:nova.api.auth:pipeline_factory_v3 -noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3 -keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3 - -[filter:request_id] -paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory - -[filter:compute_req_id] -paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory - -[filter:faultwrap] -paste.filter_factory = nova.api.openstack:FaultWrapper.factory - -[filter:noauth] -paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory - -[filter:noauth_v3] -paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory - -[filter:ratelimit] -paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory - -[filter:sizelimit] -paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory - -[app:osapi_compute_app_v2] -paste.app_factory = nova.api.openstack.compute:APIRouter.factory - -[app:osapi_compute_app_v3] -paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory - -[pipeline:oscomputeversions] -pipeline = faultwrap oscomputeversionapp - -[app:oscomputeversionapp] -paste.app_factory = nova.api.openstack.compute.versions:Versions.factory - -########## -# Shared # -########## - -[filter:keystonecontext] -paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/openstack/usr/share/openstack/nova/cells.json b/openstack/usr/share/openstack/nova/cells.json deleted file mode 100644 index cc74930d..00000000 --- a/openstack/usr/share/openstack/nova/cells.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "parent": { - "name": "parent", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": true - }, - "cell1": { - "name": "cell1", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit1.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": false - }, - "cell2": { - "name": "cell2", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit2.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": false - } -} diff --git a/openstack/usr/share/openstack/nova/logging.conf b/openstack/usr/share/openstack/nova/logging.conf deleted file mode 100644 index 5482a040..00000000 --- a/openstack/usr/share/openstack/nova/logging.conf +++ /dev/null @@ -1,81 +0,0 @@ -[loggers] -keys = root, nova - -[handlers] -keys = stderr, stdout, watchedfile, syslog, null - -[formatters] -keys = context, default - -[logger_root] -level = WARNING -handlers = null - -[logger_nova] -level = INFO -handlers = stderr -qualname = nova - -[logger_amqp] -level = WARNING -handlers = stderr -qualname = amqp - -[logger_amqplib] -level = WARNING -handlers = stderr -qualname = amqplib - -[logger_sqlalchemy] -level = WARNING -handlers = stderr -qualname = sqlalchemy -# "level = INFO" logs SQL queries. -# "level = DEBUG" logs SQL queries and results. -# "level = WARNING" logs neither. (Recommended for production systems.) - -[logger_boto] -level = WARNING -handlers = stderr -qualname = boto - -[logger_suds] -level = INFO -handlers = stderr -qualname = suds - -[logger_eventletwsgi] -level = WARNING -handlers = stderr -qualname = eventlet.wsgi.server - -[handler_stderr] -class = StreamHandler -args = (sys.stderr,) -formatter = context - -[handler_stdout] -class = StreamHandler -args = (sys.stdout,) -formatter = context - -[handler_watchedfile] -class = handlers.WatchedFileHandler -args = ('nova.log',) -formatter = context - -[handler_syslog] -class = handlers.SysLogHandler -args = ('/dev/log', handlers.SysLogHandler.LOG_USER) -formatter = context - -[handler_null] -class = nova.openstack.common.log.NullHandler -formatter = default -args = () - -[formatter_context] -class = nova.openstack.common.log.ContextFormatter - -[formatter_default] -format = %(message)s diff --git a/openstack/usr/share/openstack/nova/nova-compute.conf b/openstack/usr/share/openstack/nova/nova-compute.conf deleted file mode 100644 index 8d186211..00000000 --- a/openstack/usr/share/openstack/nova/nova-compute.conf +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] -compute_driver={{ COMPUTE_DRIVER }} -[libvirt] -virt_type={{ NOVA_VIRT_TYPE }} diff --git a/openstack/usr/share/openstack/nova/nova.conf b/openstack/usr/share/openstack/nova/nova.conf deleted file mode 100644 index 43343cdd..00000000 --- a/openstack/usr/share/openstack/nova/nova.conf +++ /dev/null @@ -1,3809 +0,0 @@ -[DEFAULT] - -# -# Options defined in oslo.messaging -# - -# Use durable queues in amqp. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues=false - -# Auto-delete queues in amqp. (boolean value) -#amqp_auto_delete=false - -# Size of RPC connection pool. (integer value) -#rpc_conn_pool_size=30 - -# Qpid broker hostname. (string value) -#qpid_hostname=localhost - -# Qpid broker port. (integer value) -#qpid_port=5672 - -# Qpid HA cluster host:port pairs. (list value) -#qpid_hosts=$qpid_hostname:$qpid_port - -# Username for Qpid connection. (string value) -#qpid_username= - -# Password for Qpid connection. (string value) -#qpid_password= - -# Space separated list of SASL mechanisms to use for auth. -# (string value) -#qpid_sasl_mechanisms= - -# Seconds between connection keepalive heartbeats. (integer -# value) -#qpid_heartbeat=60 - -# Transport to use, either 'tcp' or 'ssl'. (string value) -#qpid_protocol=tcp - -# Whether to disable the Nagle algorithm. (boolean value) -#qpid_tcp_nodelay=true - -# The number of prefetched messages held by receiver. (integer -# value) -#qpid_receiver_capacity=1 - -# The qpid topology version to use. Version 1 is what was -# originally used by impl_qpid. Version 2 includes some -# backwards-incompatible changes that allow broker federation -# to work. Users should update to version 2 when they are -# able to take everything down, as it requires a clean break. -# (integer value) -#qpid_topology_version=1 - -# SSL version to use (valid only if SSL enabled). valid values -# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some -# distributions. (string value) -#kombu_ssl_version= - -# SSL key file (valid only if SSL enabled). (string value) -#kombu_ssl_keyfile= - -# SSL cert file (valid only if SSL enabled). (string value) -#kombu_ssl_certfile= - -# SSL certification authority file (valid only if SSL -# enabled). (string value) -#kombu_ssl_ca_certs= - -# How long to wait before reconnecting in response to an AMQP -# consumer cancel notification. (floating point value) -#kombu_reconnect_delay=1.0 - -# The RabbitMQ broker address where a single node is used. -# (string value) -rabbit_host={{ RABBITMQ_HOST }} - -# The RabbitMQ broker port where a single node is used. -# (integer value) -rabbit_port={{ RABBITMQ_PORT }} - -# RabbitMQ HA cluster host:port pairs. (list value) -#rabbit_hosts=$rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -rabbit_use_ssl=false - -# The RabbitMQ userid. (string value) -rabbit_userid={{ RABBITMQ_USER }} - -# The RabbitMQ password. (string value) -rabbit_password={{ RABBITMQ_PASSWORD }} - -# the RabbitMQ login method (string value) -#rabbit_login_method=AMQPLAIN - -# The RabbitMQ virtual host. (string value) -#rabbit_virtual_host=/ - -# How frequently to retry connecting with RabbitMQ. (integer -# value) -#rabbit_retry_interval=1 - -# How long to backoff for between retries when connecting to -# RabbitMQ. (integer value) -#rabbit_retry_backoff=2 - -# Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -#rabbit_max_retries=0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change -# this option, you must wipe the RabbitMQ database. (boolean -# value) -#rabbit_ha_queues=false - -# If passed, use a fake RabbitMQ provider. (boolean value) -#fake_rabbit=false - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet -# interface, or IP. The "host" option should point or resolve -# to this address. (string value) -#rpc_zmq_bind_address=* - -# MatchMaker driver. (string value) -#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost - -# ZeroMQ receiver listening port. (integer value) -#rpc_zmq_port=9501 - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -#rpc_zmq_contexts=1 - -# Maximum number of ingress messages to locally buffer per -# topic. Default is unlimited. (integer value) -#rpc_zmq_topic_backlog= - -# Directory for holding IPC sockets. (string value) -#rpc_zmq_ipc_dir=/var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP -# address. Must match "host" option, if running Nova. (string -# value) -#rpc_zmq_host=nova - -# Seconds to wait before a cast expires (TTL). Only supported -# by impl_zmq. (integer value) -#rpc_cast_timeout=30 - -# Heartbeat frequency. (integer value) -#matchmaker_heartbeat_freq=300 - -# Heartbeat time-to-live. (integer value) -#matchmaker_heartbeat_ttl=600 - -# Size of RPC greenthread pool. (integer value) -#rpc_thread_pool_size=64 - -# Driver or drivers to handle sending notifications. (multi -# valued) -notification_driver=messagingv2 - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -#notification_topics=notifications - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout=60 - -# A URL representing the messaging driver to use and its full -# configuration. If not set, we fall back to the rpc_backend -# option and driver specific configuration. (string value) -#transport_url= - -# The messaging driver to use, defaults to rabbit. Other -# drivers include qpid and zmq. (string value) -rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the -# transport_url option. (string value) -#control_exchange=openstack - - -# -# Options defined in nova.availability_zones -# - -# The availability_zone to show internal services under -# (string value) -#internal_service_availability_zone=internal - -# Default compute node availability_zone (string value) -#default_availability_zone=nova - - -# -# Options defined in nova.crypto -# - -# Filename of root CA (string value) -#ca_file=cacert.pem - -# Filename of private key (string value) -#key_file=private/cakey.pem - -# Filename of root Certificate Revocation List (string value) -#crl_file=crl.pem - -# Where we keep our keys (string value) -#keys_path=$state_path/keys - -# Where we keep our root CA (string value) -#ca_path=$state_path/CA - -# Should we use a CA for each project? (boolean value) -#use_project_ca=false - -# Subject for certificate for users, %s for project, user, -# timestamp (string value) -#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s - -# Subject for certificate for projects, %s for project, -# timestamp (string value) -#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s - - -# -# Options defined in nova.exception -# - -# Make exception message format errors fatal (boolean value) -#fatal_exception_format_errors=false - - -# -# Options defined in nova.netconf -# - -# IP address of this host (string value) -my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# Name of this node. This can be an opaque identifier. It is -# not necessarily a hostname, FQDN, or IP address. However, -# the node name must be valid within an AMQP key, and if using -# ZeroMQ, a valid hostname, FQDN, or IP address (string value) -#host=nova - -# Use IPv6 (boolean value) -#use_ipv6=false - - -# -# Options defined in nova.notifications -# - -# If set, send compute.instance.update notifications on -# instance state changes. Valid values are None for no -# notifications, "vm_state" for notifications on VM state -# changes, or "vm_and_task_state" for notifications on VM and -# task state changes. (string value) -notify_on_state_change=vm_and_task_state - -# If set, send api.fault notifications on caught exceptions in -# the API service. (boolean value) -#notify_api_faults=false - -# Default notification level for outgoing notifications -# (string value) -#default_notification_level=INFO - -# Default publisher_id for outgoing notifications (string -# value) -#default_publisher_id= - - -# -# Options defined in nova.paths -# - -# Directory where the nova python module is installed (string -# value) -#pybasedir=/usr/lib/python/site-packages - -# Directory where nova binaries are installed (string value) -#bindir=/usr/local/bin - -# Top-level directory for maintaining nova's state (string -# value) -state_path=/var/lib/nova - - -# -# Options defined in nova.quota -# - -# Number of instances allowed per project (integer value) -#quota_instances=10 - -# Number of instance cores allowed per project (integer value) -#quota_cores=20 - -# Megabytes of instance RAM allowed per project (integer -# value) -#quota_ram=51200 - -# Number of floating IPs allowed per project (integer value) -#quota_floating_ips=10 - -# Number of fixed IPs allowed per project (this should be at -# least the number of instances allowed) (integer value) -#quota_fixed_ips=-1 - -# Number of metadata items allowed per instance (integer -# value) -#quota_metadata_items=128 - -# Number of injected files allowed (integer value) -#quota_injected_files=5 - -# Number of bytes allowed per injected file (integer value) -#quota_injected_file_content_bytes=10240 - -# Length of injected file path (integer value) -# Deprecated group/name - [DEFAULT]/quota_injected_file_path_bytes -#quota_injected_file_path_length=255 - -# Number of security groups per project (integer value) -#quota_security_groups=10 - -# Number of security rules per security group (integer value) -#quota_security_group_rules=20 - -# Number of key pairs per user (integer value) -#quota_key_pairs=100 - -# Number of server groups per project (integer value) -#quota_server_groups=10 - -# Number of servers per server group (integer value) -#quota_server_group_members=10 - -# Number of seconds until a reservation expires (integer -# value) -#reservation_expire=86400 - -# Count of reservations until usage is refreshed (integer -# value) -#until_refresh=0 - -# Number of seconds between subsequent usage refreshes -# (integer value) -#max_age=0 - -# Default driver to use for quota checks (string value) -#quota_driver=nova.quota.DbQuotaDriver - - -# -# Options defined in nova.service -# - -# Seconds between nodes reporting state to datastore (integer -# value) -#report_interval=10 - -# Enable periodic tasks (boolean value) -#periodic_enable=true - -# Range of seconds to randomly delay when starting the -# periodic task scheduler to reduce stampeding. (Disable by -# setting to 0) (integer value) -#periodic_fuzzy_delay=60 - -# A list of APIs to enable by default (list value) -enabled_apis=ec2,osapi_compute,metadata - -# A list of APIs with enabled SSL (list value) -#enabled_ssl_apis= - -# The IP address on which the EC2 API will listen. (string -# value) -#ec2_listen=0.0.0.0 - -# The port on which the EC2 API will listen. (integer value) -#ec2_listen_port=8773 - -# Number of workers for EC2 API service. The default will be -# equal to the number of CPUs available. (integer value) -#ec2_workers= - -# The IP address on which the OpenStack API will listen. -# (string value) -#osapi_compute_listen=0.0.0.0 - -# The port on which the OpenStack API will listen. (integer -# value) -#osapi_compute_listen_port=8774 - -# Number of workers for OpenStack API service. The default -# will be the number of CPUs available. (integer value) -#osapi_compute_workers= - -# OpenStack metadata service manager (string value) -#metadata_manager=nova.api.manager.MetadataManager - -# The IP address on which the metadata API will listen. -# (string value) -#metadata_listen=0.0.0.0 - -# The port on which the metadata API will listen. (integer -# value) -#metadata_listen_port=8775 - -# Number of workers for metadata service. The default will be -# the number of CPUs available. (integer value) -#metadata_workers= - -# Full class name for the Manager for compute (string value) -compute_manager={{ COMPUTE_MANAGER }} - -# Full class name for the Manager for console proxy (string -# value) -#console_manager=nova.console.manager.ConsoleProxyManager - -# Manager for console auth (string value) -#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager - -# Full class name for the Manager for cert (string value) -#cert_manager=nova.cert.manager.CertManager - -# Full class name for the Manager for network (string value) -#network_manager=nova.network.manager.VlanManager - -# Full class name for the Manager for scheduler (string value) -#scheduler_manager=nova.scheduler.manager.SchedulerManager - -# Maximum time since last check-in for up service (integer -# value) -#service_down_time=60 - - -# -# Options defined in nova.test -# - -# File name of clean sqlite db (string value) -#sqlite_clean_db=clean.sqlite - - -# -# Options defined in nova.utils -# - -# Whether to log monkey patching (boolean value) -#monkey_patch=false - -# List of modules/decorators to monkey patch (list value) -#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator - -# Length of generated instance admin passwords (integer value) -#password_length=12 - -# Time period to generate instance usages for. Time period -# must be hour, day, month or year (string value) -instance_usage_audit_period=hour - -# Path to the rootwrap configuration file to use for running -# commands as root (string value) -rootwrap_config=/etc/nova/rootwrap.conf - -# Explicitly specify the temporary working directory (string -# value) -#tempdir= - - -# -# Options defined in nova.wsgi -# - -# File name for the paste.deploy config for nova-api (string -# value) -api_paste_config=api-paste.ini - -# A python format string that is used as the template to -# generate log lines. The following values can be formatted -# into it: client_ip, date_time, request_line, status_code, -# body_length, wall_seconds. (string value) -#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f - -# CA certificate file to use to verify connecting clients -# (string value) -#ssl_ca_file= - -# SSL certificate of API server (string value) -#ssl_cert_file= - -# SSL private key of API server (string value) -#ssl_key_file= - -# Sets the value of TCP_KEEPIDLE in seconds for each server -# socket. Not supported on OS X. (integer value) -#tcp_keepidle=600 - -# Size of the pool of greenthreads used by wsgi (integer -# value) -#wsgi_default_pool_size=1000 - -# Maximum line size of message headers to be accepted. -# max_header_line may need to be increased when using large -# tokens (typically those generated by the Keystone v3 API -# with big service catalogs). (integer value) -#max_header_line=16384 - - -# -# Options defined in nova.api.auth -# - -# Whether to use per-user rate limiting for the api. This -# option is only used by v2 api. Rate limiting is removed from -# v3 api. (boolean value) -#api_rate_limit=false - -# The strategy to use for auth: noauth or keystone. (string -# value) -auth_strategy=keystone - -# Treat X-Forwarded-For as the canonical remote address. Only -# enable this if you have a sanitizing proxy. (boolean value) -#use_forwarded_for=false - - -# -# Options defined in nova.api.ec2 -# - -# Number of failed auths before lockout. (integer value) -#lockout_attempts=5 - -# Number of minutes to lockout if triggered. (integer value) -#lockout_minutes=15 - -# Number of minutes for lockout window. (integer value) -#lockout_window=15 - -# URL to get token from ec2 request. (string value) -#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens - -# Return the IP address as private dns hostname in describe -# instances (boolean value) -#ec2_private_dns_show_ip=false - -# Validate security group names according to EC2 specification -# (boolean value) -#ec2_strict_validation=true - -# Time in seconds before ec2 timestamp expires (integer value) -#ec2_timestamp_expiry=300 - - -# -# Options defined in nova.api.ec2.cloud -# - -# The IP address of the EC2 API server (string value) -#ec2_host=$my_ip - -# The internal IP address of the EC2 API server (string value) -#ec2_dmz_host=$my_ip - -# The port of the EC2 API server (integer value) -#ec2_port=8773 - -# The protocol to use when connecting to the EC2 API server -# (http, https) (string value) -#ec2_scheme=http - -# The path prefix used to call the ec2 API server (string -# value) -#ec2_path=/services/Cloud - -# List of region=fqdn pairs separated by commas (list value) -#region_list= - - -# -# Options defined in nova.api.metadata.base -# - -# List of metadata versions to skip placing into the config -# drive (string value) -#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 - -# Driver to use for vendor data (string value) -#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData - - -# -# Options defined in nova.api.metadata.vendordata_json -# - -# File to load JSON formatted vendor data from (string value) -#vendordata_jsonfile_path= - - -# -# Options defined in nova.api.openstack.common -# - -# The maximum number of items returned in a single response -# from a collection resource (integer value) -#osapi_max_limit=1000 - -# Base URL that will be presented to users in links to the -# OpenStack Compute API (string value) -#osapi_compute_link_prefix= - -# Base URL that will be presented to users in links to glance -# resources (string value) -#osapi_glance_link_prefix= - - -# -# Options defined in nova.api.openstack.compute -# - -# Permit instance snapshot operations. (boolean value) -#allow_instance_snapshots=true - - -# -# Options defined in nova.api.openstack.compute.contrib -# - -# Specify list of extensions to load when using -# osapi_compute_extension option with -# nova.api.openstack.compute.contrib.select_extensions (list -# value) -osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions - - -# -# Options defined in nova.api.openstack.compute.contrib.fping -# - -# Full path to fping. (string value) -#fping_path=/usr/sbin/fping - - -# -# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks -# - -# Enables or disables quota checking for tenant networks -# (boolean value) -#enable_network_quota=false - -# Control for checking for default networks (string value) -#use_neutron_default_nets=False - -# Default tenant id when creating neutron networks (string -# value) -#neutron_default_tenant_id=default - - -# -# Options defined in nova.api.openstack.compute.extensions -# - -# osapi compute extension to load (multi valued) -#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions - - -# -# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses -# - -# List of instance states that should hide network info (list -# value) -#osapi_hide_server_address_states=building - - -# -# Options defined in nova.api.openstack.compute.servers -# - -# Enables returning of the instance password by the relevant -# server API calls such as create, rebuild or rescue, If the -# hypervisor does not support password injection then the -# password returned will not be correct (boolean value) -#enable_instance_password=true - - -# -# Options defined in nova.api.sizelimit -# - -# The maximum body size per each osapi request(bytes) (integer -# value) -#osapi_max_request_body_size=114688 - - -# -# Options defined in nova.cert.rpcapi -# - -# The topic cert nodes listen on (string value) -#cert_topic=cert - - -# -# Options defined in nova.cloudpipe.pipelib -# - -# Image ID used when starting up a cloudpipe vpn server -# (string value) -#vpn_image_id=0 - -# Flavor for vpn instances (string value) -#vpn_flavor=m1.tiny - -# Template for cloudpipe instance boot script (string value) -#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template - -# Network to push into openvpn config (string value) -#dmz_net=10.0.0.0 - -# Netmask to push into openvpn config (string value) -#dmz_mask=255.255.255.0 - -# Suffix to add to project name for vpn key and secgroups -# (string value) -#vpn_key_suffix=-vpn - - -# -# Options defined in nova.cmd.novnc -# - -# Record sessions to FILE.[session_number] (boolean value) -#record=false - -# Become a daemon (background process) (boolean value) -#daemon=false - -# Disallow non-encrypted connections (boolean value) -#ssl_only=false - -# Source is ipv6 (boolean value) -#source_is_ipv6=false - -# SSL certificate file (string value) -#cert=self.pem - -# SSL key file (if separate from cert) (string value) -#key= - -# Run webserver on same port. Serve files from DIR. (string -# value) -#web=/usr/share/spice-html5 - - -# -# Options defined in nova.cmd.novncproxy -# - -# Host on which to listen for incoming requests (string value) -#novncproxy_host=0.0.0.0 - -# Port on which to listen for incoming requests (integer -# value) -#novncproxy_port=6080 - - -# -# Options defined in nova.compute.api -# - -# Allow destination machine to match source for resize. Useful -# when testing in single-host environments. (boolean value) -#allow_resize_to_same_host=false - -# Allow migrate machine to the same host. Useful when testing -# in single-host environments. (boolean value) -#allow_migrate_to_same_host=false - -# Availability zone to use when user doesn't specify one -# (string value) -#default_schedule_zone= - -# These are image properties which a snapshot should not -# inherit from an instance (list value) -#non_inheritable_image_properties=cache_in_nova,bittorrent - -# Kernel image that indicates not to use a kernel, but to use -# a raw disk image instead (string value) -#null_kernel=nokernel - -# When creating multiple instances with a single request using -# the os-multiple-create API extension, this template will be -# used to build the display name for each instance. The -# benefit is that the instances end up with different -# hostnames. To restore legacy behavior of every instance -# having the same name, set this option to "%(name)s". Valid -# keys for the template are: name, uuid, count. (string value) -#multi_instance_display_name_template=%(name)s-%(uuid)s - -# Maximum number of devices that will result in a local image -# being created on the hypervisor node. Setting this to 0 -# means nova will allow only boot from volume. A negative -# number means unlimited. (integer value) -#max_local_block_devices=3 - - -# -# Options defined in nova.compute.flavors -# - -# Default flavor to use for the EC2 API only. The Nova API -# does not support a default flavor. (string value) -#default_flavor=m1.small - - -# -# Options defined in nova.compute.manager -# - -# Console proxy host to use to connect to instances on this -# host. (string value) -#console_host=nova - -# Name of network to use to set access IPs for instances -# (string value) -#default_access_ip_network_name= - -# Whether to batch up the application of IPTables rules during -# a host restart and apply all at the end of the init phase -# (boolean value) -#defer_iptables_apply=false - -# Where instances are stored on disk (string value) -#instances_path=$state_path/instances - -# Generate periodic compute.instance.exists notifications -# (boolean value) -instance_usage_audit=True - -# Number of 1 second retries needed in live_migration (integer -# value) -#live_migration_retry_count=30 - -# Whether to start guests that were running before the host -# rebooted (boolean value) -#resume_guests_state_on_host_boot=false - -# Number of times to retry network allocation on failures -# (integer value) -#network_allocate_retries=0 - -# Number of times to retry block device allocation on failures -# (integer value) -#block_device_allocate_retries=60 - -# The number of times to attempt to reap an instance's files. -# (integer value) -#maximum_instance_delete_attempts=5 - -# Interval to pull network bandwidth usage info. Not supported -# on all hypervisors. Set to -1 to disable. Setting this to 0 -# will disable, but this will change in the K release to mean -# "run at the default rate". (integer value) -#bandwidth_poll_interval=600 - -# Interval to sync power states between the database and the -# hypervisor. Set to -1 to disable. Setting this to 0 will -# disable, but this will change in Juno to mean "run at the -# default rate". (integer value) -#sync_power_state_interval=600 - -# Number of seconds between instance info_cache self healing -# updates (integer value) -#heal_instance_info_cache_interval=60 - -# Interval in seconds for reclaiming deleted instances -# (integer value) -#reclaim_instance_interval=0 - -# Interval in seconds for gathering volume usages (integer -# value) -#volume_usage_poll_interval=0 - -# Interval in seconds for polling shelved instances to -# offload. Set to -1 to disable.Setting this to 0 will -# disable, but this will change in Juno to mean "run at the -# default rate". (integer value) -#shelved_poll_interval=3600 - -# Time in seconds before a shelved instance is eligible for -# removing from a host. -1 never offload, 0 offload when -# shelved (integer value) -#shelved_offload_time=0 - -# Interval in seconds for retrying failed instance file -# deletes (integer value) -#instance_delete_interval=300 - -# Waiting time interval (seconds) between block device -# allocation retries on failures (integer value) -#block_device_allocate_retries_interval=3 - -# Action to take if a running deleted instance is -# detected.Valid options are 'noop', 'log', 'shutdown', or -# 'reap'. Set to 'noop' to take no action. (string value) -#running_deleted_instance_action=reap - -# Number of seconds to wait between runs of the cleanup task. -# (integer value) -#running_deleted_instance_poll_interval=1800 - -# Number of seconds after being deleted when a running -# instance should be considered eligible for cleanup. (integer -# value) -#running_deleted_instance_timeout=0 - -# Automatically hard reboot an instance if it has been stuck -# in a rebooting state longer than N seconds. Set to 0 to -# disable. (integer value) -#reboot_timeout=0 - -# Amount of time in seconds an instance can be in BUILD before -# going into ERROR status.Set to 0 to disable. (integer value) -#instance_build_timeout=0 - -# Automatically unrescue an instance after N seconds. Set to 0 -# to disable. (integer value) -#rescue_timeout=0 - -# Automatically confirm resizes after N seconds. Set to 0 to -# disable. (integer value) -#resize_confirm_window=0 - -# Total amount of time to wait in seconds for an instance to -# perform a clean shutdown. (integer value) -#shutdown_timeout=60 - - -# -# Options defined in nova.compute.monitors -# - -# Monitor classes available to the compute which may be -# specified more than once. (multi valued) -#compute_available_monitors=nova.compute.monitors.all_monitors - -# A list of monitors that can be used for getting compute -# metrics. (list value) -#compute_monitors= - - -# -# Options defined in nova.compute.resource_tracker -# - -# Amount of disk in MB to reserve for the host (integer value) -#reserved_host_disk_mb=0 - -# Amount of memory in MB to reserve for the host (integer -# value) -reserved_host_memory_mb={{ RESERVED_HOST_MEMORY_MB }} - -# Class that will manage stats for the local compute host -# (string value) -#compute_stats_class=nova.compute.stats.Stats - -# The names of the extra resources to track. (list value) -#compute_resources=vcpu - - -# -# Options defined in nova.compute.rpcapi -# - -# The topic compute nodes listen on (string value) -#compute_topic=compute - - -# -# Options defined in nova.conductor.tasks.live_migrate -# - -# Number of times to retry live-migration before failing. If -# == -1, try until out of hosts. If == 0, only try once, no -# retries. (integer value) -#migrate_max_retries=-1 - - -# -# Options defined in nova.console.manager -# - -# Driver to use for the console proxy (string value) -#console_driver=nova.console.xvp.XVPConsoleProxy - -# Stub calls to compute worker for tests (boolean value) -#stub_compute=false - -# Publicly visible name for this console host (string value) -#console_public_hostname=nova - - -# -# Options defined in nova.console.rpcapi -# - -# The topic console proxy nodes listen on (string value) -#console_topic=console - - -# -# Options defined in nova.console.vmrc -# - -# DEPRECATED. Port for VMware VMRC connections (integer value) -#console_vmrc_port=443 - -# DEPRECATED. Number of retries for retrieving VMRC -# information (integer value) -#console_vmrc_error_retries=10 - - -# -# Options defined in nova.console.xvp -# - -# XVP conf template (string value) -#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template - -# Generated XVP conf file (string value) -#console_xvp_conf=/etc/xvp.conf - -# XVP master process pid file (string value) -#console_xvp_pid=/var/run/xvp.pid - -# XVP log file (string value) -#console_xvp_log=/var/log/xvp.log - -# Port for XVP to multiplex VNC connections on (integer value) -#console_xvp_multiplex_port=5900 - - -# -# Options defined in nova.consoleauth -# - -# The topic console auth proxy nodes listen on (string value) -#consoleauth_topic=consoleauth - - -# -# Options defined in nova.consoleauth.manager -# - -# How many seconds before deleting tokens (integer value) -#console_token_ttl=600 - - -# -# Options defined in nova.db.api -# - -# Services to be added to the available pool on create -# (boolean value) -#enable_new_services=true - -# Template string to be used to generate instance names -# (string value) -instance_name_template=instance-%08x - -# Template string to be used to generate snapshot names -# (string value) -snapshot_name_template=snapshot-%s - - -# -# Options defined in nova.db.base -# - -# The driver to use for database access (string value) -#db_driver=nova.db - - -# -# Options defined in nova.db.sqlalchemy.api -# - -# When set, compute API will consider duplicate hostnames -# invalid within the specified scope, regardless of case. -# Should be empty, "project" or "global". (string value) -#osapi_compute_unique_server_name_scope= - - -# -# Options defined in nova.image.s3 -# - -# Parent directory for tempdir used for image decryption -# (string value) -#image_decryption_dir=/tmp - -# Hostname or IP for OpenStack to use when accessing the S3 -# api (string value) -#s3_host=$my_ip - -# Port used when accessing the S3 api (integer value) -#s3_port=3333 - -# Access key to use for S3 server for images (string value) -#s3_access_key=notchecked - -# Secret key to use for S3 server for images (string value) -#s3_secret_key=notchecked - -# Whether to use SSL when talking to S3 (boolean value) -#s3_use_ssl=false - -# Whether to affix the tenant id to the access key when -# downloading from S3 (boolean value) -#s3_affix_tenant=false - - -# -# Options defined in nova.ipv6.api -# - -# Backend to use for IPv6 generation (string value) -#ipv6_backend=rfc2462 - - -# -# Options defined in nova.network -# - -# The full class name of the network API class to use (string -# value) -network_api_class=nova.network.neutronv2.api.API - - -# -# Options defined in nova.network.driver -# - -# Driver to use for network creation (string value) -#network_driver=nova.network.linux_net - - -# -# Options defined in nova.network.floating_ips -# - -# Default pool for floating IPs (string value) -#default_floating_pool=nova - -# Autoassigning floating IP to VM (boolean value) -#auto_assign_floating_ip=false - -# Full class name for the DNS Manager for floating IPs (string -# value) -#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver - -# Full class name for the DNS Manager for instance IPs (string -# value) -#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver - -# Full class name for the DNS Zone for instance IPs (string -# value) -#instance_dns_domain= - - -# -# Options defined in nova.network.ldapdns -# - -# URL for LDAP server which will store DNS entries (string -# value) -#ldap_dns_url=ldap://ldap.example.com:389 - -# User for LDAP DNS (string value) -#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org - -# Password for LDAP DNS (string value) -#ldap_dns_password=password - -# Hostmaster for LDAP DNS driver Statement of Authority -# (string value) -#ldap_dns_soa_hostmaster=hostmaster@example.org - -# DNS Servers for LDAP DNS driver (multi valued) -#ldap_dns_servers=dns.example.org - -# Base DN for DNS entries in LDAP (string value) -#ldap_dns_base_dn=ou=hosts,dc=example,dc=org - -# Refresh interval (in seconds) for LDAP DNS driver Statement -# of Authority (string value) -#ldap_dns_soa_refresh=1800 - -# Retry interval (in seconds) for LDAP DNS driver Statement of -# Authority (string value) -#ldap_dns_soa_retry=3600 - -# Expiry interval (in seconds) for LDAP DNS driver Statement -# of Authority (string value) -#ldap_dns_soa_expiry=86400 - -# Minimum interval (in seconds) for LDAP DNS driver Statement -# of Authority (string value) -#ldap_dns_soa_minimum=7200 - - -# -# Options defined in nova.network.linux_net -# - -# Location of flagfiles for dhcpbridge (multi valued) -#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf - -# Location to keep network config files (string value) -#networks_path=$state_path/networks - -# Interface for public IP addresses (string value) -#public_interface=eth0 - -# Location of nova-dhcpbridge (string value) -#dhcpbridge=$bindir/nova-dhcpbridge - -# Public IP of network host (string value) -#routing_source_ip=$my_ip - -# Lifetime of a DHCP lease in seconds (integer value) -#dhcp_lease_time=86400 - -# If set, uses specific DNS server for dnsmasq. Can be -# specified multiple times. (multi valued) -#dns_server= - -# If set, uses the dns1 and dns2 from the network ref. as dns -# servers. (boolean value) -#use_network_dns_servers=false - -# A list of dmz range that should be accepted (list value) -#dmz_cidr= - -# Traffic to this range will always be snatted to the fallback -# ip, even if it would normally be bridged out of the node. -# Can be specified multiple times. (multi valued) -#force_snat_range= - -# Override the default dnsmasq settings with this file (string -# value) -#dnsmasq_config_file= - -# Driver used to create ethernet devices. (string value) -linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver - -# Name of Open vSwitch bridge used with linuxnet (string -# value) -#linuxnet_ovs_integration_bridge=br-int - -# Send gratuitous ARPs for HA setup (boolean value) -#send_arp_for_ha=false - -# Send this many gratuitous ARPs for HA setup (integer value) -#send_arp_for_ha_count=3 - -# Use single default gateway. Only first nic of vm will get -# default gateway from dhcp server (boolean value) -#use_single_default_gateway=false - -# An interface that bridges can forward to. If this is set to -# all then all traffic will be forwarded. Can be specified -# multiple times. (multi valued) -#forward_bridge_interface=all - -# The IP address for the metadata API server (string value) -#metadata_host=$my_ip - -# The port for the metadata API port (integer value) -#metadata_port=8775 - -# Regular expression to match iptables rule that should always -# be on the top. (string value) -#iptables_top_regex= - -# Regular expression to match iptables rule that should always -# be on the bottom. (string value) -#iptables_bottom_regex= - -# The table that iptables to jump to when a packet is to be -# dropped. (string value) -#iptables_drop_action=DROP - -# Amount of time, in seconds, that ovs_vsctl should wait for a -# response from the database. 0 is to wait forever. (integer -# value) -#ovs_vsctl_timeout=120 - -# If passed, use fake network devices and addresses (boolean -# value) -#fake_network=false - - -# -# Options defined in nova.network.manager -# - -# Bridge for simple network instances (string value) -#flat_network_bridge= - -# DNS server for simple network (string value) -#flat_network_dns=8.8.4.4 - -# Whether to attempt to inject network setup into guest -# (boolean value) -#flat_injected=false - -# FlatDhcp will bridge into this interface if set (string -# value) -#flat_interface= - -# First VLAN for private networks (integer value) -#vlan_start=100 - -# VLANs will bridge into this interface if set (string value) -#vlan_interface= - -# Number of networks to support (integer value) -#num_networks=1 - -# Public IP for the cloudpipe VPN servers (string value) -#vpn_ip=$my_ip - -# First Vpn port for private networks (integer value) -#vpn_start=1000 - -# Number of addresses in each private subnet (integer value) -#network_size=256 - -# Fixed IPv6 address block (string value) -#fixed_range_v6=fd00::/48 - -# Default IPv4 gateway (string value) -#gateway= - -# Default IPv6 gateway (string value) -#gateway_v6= - -# Number of addresses reserved for vpn clients (integer value) -#cnt_vpn_clients=0 - -# Seconds after which a deallocated IP is disassociated -# (integer value) -#fixed_ip_disassociate_timeout=600 - -# Number of attempts to create unique mac address (integer -# value) -#create_unique_mac_address_attempts=5 - -# If True, skip using the queue and make local calls (boolean -# value) -#fake_call=false - -# If True, unused gateway devices (VLAN and bridge) are -# deleted in VLAN network mode with multi hosted networks -# (boolean value) -#teardown_unused_network_gateway=false - -# If True, send a dhcp release on instance termination -# (boolean value) -#force_dhcp_release=true - -# If True, when a DNS entry must be updated, it sends a fanout -# cast to all network hosts to update their DNS entries in -# multi host mode (boolean value) -#update_dns_entries=false - -# Number of seconds to wait between runs of updates to DNS -# entries. (integer value) -#dns_update_periodic_interval=-1 - -# Domain to use for building the hostnames (string value) -#dhcp_domain=novalocal - -# Indicates underlying L3 management library (string value) -#l3_lib=nova.network.l3.LinuxNetL3 - - -# -# Options defined in nova.network.rpcapi -# - -# The topic network nodes listen on (string value) -#network_topic=network - -# Default value for multi_host in networks. Also, if set, some -# rpc network calls will be sent directly to host. (boolean -# value) -#multi_host=false - - -# -# Options defined in nova.network.security_group.openstack_driver -# - -# The full class name of the security API class (string value) -security_group_api=neutron - - -# -# Options defined in nova.objects.network -# - -# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE -# NETWORK. If True in multi_host mode, all compute hosts share -# the same dhcp address. The same IP address used for DHCP -# will be added on each nova-network node which is only -# visible to the vms on the same host. (boolean value) -#share_dhcp_address=false - -# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE -# NETWORK. MTU setting for network interface. (integer value) -#network_device_mtu= - - -# -# Options defined in nova.objectstore.s3server -# - -# Path to S3 buckets (string value) -#buckets_path=$state_path/buckets - -# IP address for S3 API to listen (string value) -#s3_listen=0.0.0.0 - -# Port for S3 API to listen (integer value) -#s3_listen_port=3333 - - -# -# Options defined in nova.openstack.common.eventlet_backdoor -# - -# Enable eventlet backdoor. Acceptable values are 0, , -# and :, where 0 results in listening on a random -# tcp port number; results in listening on the -# specified port number (and not enabling backdoor if that -# port is in use); and : results in listening on -# the smallest unused port number within the specified range -# of port numbers. The chosen port is displayed in the -# service's log file. (string value) -#backdoor_port= - - -# -# Options defined in nova.openstack.common.lockutils -# - -# Enables or disables inter-process locks. (boolean value) -#disable_process_locking=false - -# Directory to use for lock files. (string value) -lock_path=/var/lock/nova - - -# -# Options defined in nova.openstack.common.log -# - -# Print debugging output (set logging level to DEBUG instead -# of default WARNING level). (boolean value) -#debug=false - -# Print more verbose output (set logging level to INFO instead -# of default WARNING level). (boolean value) -#verbose=false - -# Log output to standard error. (boolean value) -#use_stderr=true - -# Format string to use for log messages with context. (string -# value) -#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages without context. -# (string value) -#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG. (string -# value) -#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. -# (string value) -#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s - -# List of logger=LEVEL pairs. (list value) -#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN - -# Enables or disables publication of error events. (boolean -# value) -#publish_errors=false - -# Enables or disables fatal status of deprecations. (boolean -# value) -#fatal_deprecations=false - -# The format for an instance that is passed with the log -# message. (string value) -#instance_format="[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log -# message. (string value) -#instance_uuid_format="[instance: %(uuid)s] " - -# The name of a logging configuration file. This file is -# appended to any existing logging configuration files. For -# details about logging configuration files, see the Python -# logging module documentation. (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append= - -# DEPRECATED. A logging.Formatter log message format string -# which may use any of the available logging.LogRecord -# attributes. This option is deprecated. Please use -# logging_context_format_string and -# logging_default_format_string instead. (string value) -#log_format= - -# Format string for %%(asctime)s in log records. Default: -# %(default)s . (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If no default is -# set, logging will go to stdout. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file= - -# (Optional) The base directory used for relative --log-file -# paths. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir= - -# Use syslog for logging. Existing syslog format is DEPRECATED -# during I, and will change in J to honor RFC5424. (boolean -# value) -use_syslog=True - -# (Optional) Enables or disables syslog rfc5424 format for -# logging. If enabled, prefixes the MSG part of the syslog -# message with APP-NAME (RFC5424). The format without the APP- -# NAME is deprecated in I, and will be removed in J. (boolean -# value) -#use_syslog_rfc_format=false - -# Syslog facility to receive log lines. (string value) -#syslog_log_facility=LOG_USER - - -# -# Options defined in nova.openstack.common.memorycache -# - -# Memcached servers or None for in process cache. (list value) -#memcached_servers= - - -# -# Options defined in nova.openstack.common.periodic_task -# - -# Some periodic tasks can be run in a separate process. Should -# we run them here? (boolean value) -#run_external_periodic_tasks=true - - -# -# Options defined in nova.openstack.common.policy -# - -# The JSON file that defines policies. (string value) -#policy_file=policy.json - -# Default rule. Enforced when a requested rule is not found. -# (string value) -#policy_default_rule=default - - -# -# Options defined in nova.pci.pci_request -# - -# An alias for a PCI passthrough device requirement. This -# allows users to specify the alias in the extra_spec for a -# flavor, without needing to repeat all the PCI property -# requirements. For example: pci_alias = { "name": -# "QuicAssist", "product_id": "0443", "vendor_id": "8086", -# "device_type": "ACCEL" } defines an alias for the Intel -# QuickAssist card. (multi valued) (multi valued) -#pci_alias= - - -# -# Options defined in nova.pci.pci_whitelist -# - -# White list of PCI devices available to VMs. For example: -# pci_passthrough_whitelist = [{"vendor_id": "8086", -# "product_id": "0443"}] (multi valued) -#pci_passthrough_whitelist= - - -# -# Options defined in nova.scheduler.driver -# - -# The scheduler host manager class to use (string value) -scheduler_host_manager={{ SCHEDULER_HOST_MANAGER }} - - -# -# Options defined in nova.scheduler.filter_scheduler -# - -# New instances will be scheduled on a host chosen randomly -# from a subset of the N best hosts. This property defines the -# subset size that a host is chosen from. A value of 1 chooses -# the first host returned by the weighing functions. This -# value must be at least 1. Any value less than 1 will be -# ignored, and 1 will be used instead (integer value) -#scheduler_host_subset_size=1 - - -# -# Options defined in nova.scheduler.filters.aggregate_image_properties_isolation -# - -# Force the filter to consider only keys matching the given -# namespace. (string value) -#aggregate_image_properties_isolation_namespace= - -# The separator used between the namespace and keys (string -# value) -#aggregate_image_properties_isolation_separator=. - - -# -# Options defined in nova.scheduler.filters.core_filter -# - -# Virtual CPU to physical CPU allocation ratio which affects -# all CPU filters. This configuration specifies a global ratio -# for CoreFilter. For AggregateCoreFilter, it will fall back -# to this configuration value if no per-aggregate setting -# found. (floating point value) -#cpu_allocation_ratio=16.0 - - -# -# Options defined in nova.scheduler.filters.disk_filter -# - -# Virtual disk to physical disk allocation ratio (floating -# point value) -#disk_allocation_ratio=1.0 - - -# -# Options defined in nova.scheduler.filters.io_ops_filter -# - -# Tells filters to ignore hosts that have this many or more -# instances currently in build, resize, snapshot, migrate, -# rescue or unshelve task states (integer value) -#max_io_ops_per_host=8 - - -# -# Options defined in nova.scheduler.filters.isolated_hosts_filter -# - -# Images to run on isolated host (list value) -#isolated_images= - -# Host reserved for specific images (list value) -#isolated_hosts= - -# Whether to force isolated hosts to run only isolated images -# (boolean value) -#restrict_isolated_hosts_to_isolated_images=true - - -# -# Options defined in nova.scheduler.filters.num_instances_filter -# - -# Ignore hosts that have too many instances (integer value) -#max_instances_per_host=50 - - -# -# Options defined in nova.scheduler.filters.ram_filter -# - -# Virtual ram to physical ram allocation ratio which affects -# all ram filters. This configuration specifies a global ratio -# for RamFilter. For AggregateRamFilter, it will fall back to -# this configuration value if no per-aggregate setting found. -# (floating point value) -ram_allocation_ratio={{ RAM_ALLOCATION_RATIO }} - - -# -# Options defined in nova.scheduler.host_manager -# - -# Filter classes available to the scheduler which may be -# specified more than once. An entry of -# "nova.scheduler.filters.standard_filters" maps to all -# filters included with nova. (multi valued) -#scheduler_available_filters=nova.scheduler.filters.all_filters - -# Which filter class names to use for filtering hosts when not -# specified in the request. (list value) -scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter - -# Which weight class names to use for weighing hosts (list -# value) -#scheduler_weight_classes=nova.scheduler.weights.all_weighers - - -# -# Options defined in nova.scheduler.ironic_host_manager -# - -# Which filter class names to use for filtering baremetal -# hosts when not specified in the request. (list value) -#baremetal_scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter - -# Flag to decide whether to use -# baremetal_scheduler_default_filters or not. (boolean value) -#scheduler_use_baremetal_filters=false - - -# -# Options defined in nova.scheduler.manager -# - -# Default driver to use for the scheduler (string value) -scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler - -# How often (in seconds) to run periodic tasks in the -# scheduler driver of your choice. Please note this is likely -# to interact with the value of service_down_time, but exactly -# how they interact will depend on your choice of scheduler -# driver. (integer value) -#scheduler_driver_task_period=60 - - -# -# Options defined in nova.scheduler.rpcapi -# - -# The topic scheduler nodes listen on (string value) -#scheduler_topic=scheduler - - -# -# Options defined in nova.scheduler.scheduler_options -# - -# Absolute path to scheduler configuration JSON file. (string -# value) -#scheduler_json_config_location= - - -# -# Options defined in nova.scheduler.utils -# - -# Maximum number of attempts to schedule an instance (integer -# value) -#scheduler_max_attempts=3 - - -# -# Options defined in nova.scheduler.weights.ram -# - -# Multiplier used for weighing ram. Negative numbers mean to -# stack vs spread. (floating point value) -#ram_weight_multiplier=1.0 - - -# -# Options defined in nova.servicegroup.api -# - -# The driver for servicegroup service (valid options are: db, -# zk, mc) (string value) -#servicegroup_driver=db - - -# -# Options defined in nova.virt.configdrive -# - -# Config drive format. One of iso9660 (default) or vfat -# (string value) -#config_drive_format=iso9660 - -# DEPRECATED (not needed any more): Where to put temporary -# files associated with config drive creation (string value) -#config_drive_tempdir= - -# Set to force injection to take place on a config drive (if -# set, valid options are: always) (string value) -#force_config_drive= - -# Name and optionally path of the tool used for ISO image -# creation (string value) -#mkisofs_cmd=genisoimage - - -# -# Options defined in nova.virt.disk.api -# - -# Name of the mkfs commands for ephemeral device. The format -# is = (multi valued) -#virt_mkfs= - -# Attempt to resize the filesystem by accessing the image over -# a block device. This is done by the host and may not be -# necessary if the image contains a recent version of cloud- -# init. Possible mechanisms require the nbd driver (for qcow -# and raw), or loop (for raw). (boolean value) -#resize_fs_using_block_device=false - - -# -# Options defined in nova.virt.disk.mount.nbd -# - -# Amount of time, in seconds, to wait for NBD device start up. -# (integer value) -#timeout_nbd=10 - - -# -# Options defined in nova.virt.driver -# - -# Driver to use for controlling virtualization. Options -# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, -# fake.FakeDriver, baremetal.BareMetalDriver, -# vmwareapi.VMwareVCDriver, hyperv.HyperVDriver (string value) -compute_driver={{ COMPUTE_DRIVER }} - -# The default format an ephemeral_volume will be formatted -# with on creation. (string value) -#default_ephemeral_format= - -# VM image preallocation mode: "none" => no storage -# provisioning is done up front, "space" => storage is fully -# allocated at instance start (string value) -#preallocate_images=none - -# Whether to use cow images (boolean value) -#use_cow_images=true - -# Fail instance boot if vif plugging fails (boolean value) -#vif_plugging_is_fatal=true - -# Number of seconds to wait for neutron vif plugging events to -# arrive before continuing or failing (see -# vif_plugging_is_fatal). If this is set to zero and -# vif_plugging_is_fatal is False, events should not be -# expected to arrive at all. (integer value) -#vif_plugging_timeout=300 - - -# -# Options defined in nova.virt.firewall -# - -# Firewall driver (defaults to hypervisor specific iptables -# driver) (string value) -firewall_driver=nova.virt.firewall.NoopFirewallDriver - -# Whether to allow network traffic from same network (boolean -# value) -#allow_same_net_traffic=true - - -# -# Options defined in nova.virt.hardware -# - -# Defines which pcpus that instance vcpus can use. For -# example, "4-12,^8,15" (string value) -#vcpu_pin_set= - - -# -# Options defined in nova.virt.imagecache -# - -# Number of seconds to wait between runs of the image cache -# manager. Set to -1 to disable. Setting this to 0 will -# disable, but this will change in the K release to mean "run -# at the default rate". (integer value) -#image_cache_manager_interval=2400 - -# Where cached images are stored under $instances_path. This -# is NOT the full path - just a folder name. For per-compute- -# host cached images, set to _base_$my_ip (string value) -#image_cache_subdirectory_name=_base - -# Should unused base images be removed? (boolean value) -#remove_unused_base_images=true - -# Unused unresized base images younger than this will not be -# removed (integer value) -#remove_unused_original_minimum_age_seconds=86400 - - -# -# Options defined in nova.virt.images -# - -# Force backing images to raw format (boolean value) -#force_raw_images=true - - -# -# Options defined in nova.virt.netutils -# - -# Template file for injected network (string value) -#injected_network_template=$pybasedir/nova/virt/interfaces.template - - -# -# Options defined in nova.vnc -# - -# Location of VNC console proxy, in the form -# "http://127.0.0.1:6080/vnc_auto.html" (string value) -novncproxy_base_url=http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:6080/vnc_auto.html - -# Location of nova xvp VNC console proxy, in the form -# "http://127.0.0.1:6081/console" (string value) -#xvpvncproxy_base_url=http://127.0.0.1:6081/console - -# IP address on which instance vncservers should listen -# (string value) -vncserver_listen=0.0.0.0 - -# The address to which proxy clients (like nova-xvpvncproxy) -# should connect (string value) -vncserver_proxyclient_address={{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -# Enable VNC related features (boolean value) -vnc_enabled=true - -# Keymap for VNC (string value) -vnc_keymap=en-us - - -# -# Options defined in nova.vnc.xvp_proxy -# - -# Port that the XCP VNC proxy should bind to (integer value) -#xvpvncproxy_port=6081 - -# Address that the XCP VNC proxy should bind to (string value) -#xvpvncproxy_host=0.0.0.0 - - -# -# Options defined in nova.volume -# - -# The full class name of the volume API class to use (string -# value) -#volume_api_class=nova.volume.cinder.API - - -[baremetal] - -# -# Options defined in nova.virt.baremetal.db.api -# - -# The backend to use for bare-metal database (string value) -#db_backend=sqlalchemy - - -# -# Options defined in nova.virt.baremetal.db.sqlalchemy.session -# - -# The SQLAlchemy connection string used to connect to the -# bare-metal database (string value) -#sql_connection=sqlite:///$state_path/baremetal_nova.sqlite - - -# -# Options defined in nova.virt.baremetal.driver -# - -# Baremetal VIF driver. (string value) -#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver - -# Baremetal volume driver. (string value) -#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver - -# A list of additional capabilities corresponding to -# flavor_extra_specs for this compute host to advertise. Valid -# entries are name=value, pairs For example, "key1:val1, -# key2:val2" (list value) -#flavor_extra_specs= - -# Baremetal driver back-end (pxe or tilera) (string value) -#driver=nova.virt.baremetal.pxe.PXE - -# Baremetal power management method (string value) -#power_manager=nova.virt.baremetal.ipmi.IPMI - -# Baremetal compute node's tftp root path (string value) -#tftp_root=/tftpboot - - -# -# Options defined in nova.virt.baremetal.ipmi -# - -# Path to baremetal terminal program (string value) -#terminal=shellinaboxd - -# Path to baremetal terminal SSL cert(PEM) (string value) -#terminal_cert_dir= - -# Path to directory stores pidfiles of baremetal_terminal -# (string value) -#terminal_pid_dir=$state_path/baremetal/console - -# Maximal number of retries for IPMI operations (integer -# value) -#ipmi_power_retry=10 - - -# -# Options defined in nova.virt.baremetal.pxe -# - -# Default kernel image ID used in deployment phase (string -# value) -#deploy_kernel= - -# Default ramdisk image ID used in deployment phase (string -# value) -#deploy_ramdisk= - -# Template file for injected network config (string value) -#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template - -# Additional append parameters for baremetal PXE boot (string -# value) -#pxe_append_params=nofb nomodeset vga=normal - -# Template file for PXE configuration (string value) -#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template - -# If True, enable file injection for network info, files and -# admin password (boolean value) -#use_file_injection=false - -# Timeout for PXE deployments. Default: 0 (unlimited) (integer -# value) -#pxe_deploy_timeout=0 - -# If set, pass the network configuration details to the -# initramfs via cmdline. (boolean value) -#pxe_network_config=false - -# This gets passed to Neutron as the bootfile dhcp parameter. -# (string value) -#pxe_bootfile_name=pxelinux.0 - - -# -# Options defined in nova.virt.baremetal.tilera_pdu -# - -# IP address of tilera pdu (string value) -#tile_pdu_ip=10.0.100.1 - -# Management script for tilera pdu (string value) -#tile_pdu_mgr=/tftpboot/pdu_mgr - -# Power status of tilera PDU is OFF (integer value) -#tile_pdu_off=2 - -# Power status of tilera PDU is ON (integer value) -#tile_pdu_on=1 - -# Power status of tilera PDU (integer value) -#tile_pdu_status=9 - -# Wait time in seconds until check the result after tilera -# power operations (integer value) -#tile_power_wait=9 - - -# -# Options defined in nova.virt.baremetal.virtual_power_driver -# - -# IP or name to virtual power host (string value) -#virtual_power_ssh_host= - -# Port to use for ssh to virtual power host (integer value) -#virtual_power_ssh_port=22 - -# Base command to use for virtual power(vbox, virsh) (string -# value) -#virtual_power_type=virsh - -# User to execute virtual power commands as (string value) -#virtual_power_host_user= - -# Password for virtual power host_user (string value) -#virtual_power_host_pass= - -# The ssh key for virtual power host_user (string value) -#virtual_power_host_key= - - -# -# Options defined in nova.virt.baremetal.volume_driver -# - -# Do not set this out of dev/test environments. If a node does -# not have a fixed PXE IP address, volumes are exported with -# globally opened ACL (boolean value) -#use_unsafe_iscsi=false - -# The iSCSI IQN prefix used in baremetal volume connections. -# (string value) -#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal - - -[cells] - -# -# Options defined in nova.cells.manager -# - -# Cells communication driver to use (string value) -#driver=nova.cells.rpc_driver.CellsRPCDriver - -# Number of seconds after an instance was updated or deleted -# to continue to update cells (integer value) -#instance_updated_at_threshold=3600 - -# Number of instances to update per periodic task run (integer -# value) -#instance_update_num_instances=1 - - -# -# Options defined in nova.cells.messaging -# - -# Maximum number of hops for cells routing. (integer value) -#max_hop_count=10 - -# Cells scheduler to use (string value) -#scheduler=nova.cells.scheduler.CellsScheduler - - -# -# Options defined in nova.cells.opts -# - -# Enable cell functionality (boolean value) -#enable=false - -# The topic cells nodes listen on (string value) -#topic=cells - -# Manager for cells (string value) -#manager=nova.cells.manager.CellsManager - -# Name of this cell (string value) -#name=nova - -# Key/Multi-value list with the capabilities of the cell (list -# value) -#capabilities=hypervisor=xenserver;kvm,os=linux;windows - -# Seconds to wait for response from a call to a cell. (integer -# value) -#call_timeout=60 - -# Percentage of cell capacity to hold in reserve. Affects both -# memory and disk utilization (floating point value) -#reserve_percent=10.0 - -# Type of cell: api or compute (string value) -#cell_type=compute - -# Number of seconds after which a lack of capability and -# capacity updates signals the child cell is to be treated as -# a mute. (integer value) -#mute_child_interval=300 - -# Seconds between bandwidth updates for cells. (integer value) -#bandwidth_update_interval=600 - - -# -# Options defined in nova.cells.rpc_driver -# - -# Base queue name to use when communicating between cells. -# Various topics by message type will be appended to this. -# (string value) -#rpc_driver_queue_base=cells.intercell - - -# -# Options defined in nova.cells.scheduler -# - -# Filter classes the cells scheduler should use. An entry of -# "nova.cells.filters.all_filters" maps to all cells filters -# included with nova. (list value) -#scheduler_filter_classes=nova.cells.filters.all_filters - -# Weigher classes the cells scheduler should use. An entry of -# "nova.cells.weights.all_weighers" maps to all cell weighers -# included with nova. (list value) -#scheduler_weight_classes=nova.cells.weights.all_weighers - -# How many retries when no cells are available. (integer -# value) -#scheduler_retries=10 - -# How often to retry in seconds when no cells are available. -# (integer value) -#scheduler_retry_delay=2 - - -# -# Options defined in nova.cells.state -# - -# Interval, in seconds, for getting fresh cell information -# from the database. (integer value) -#db_check_interval=60 - -# Configuration file from which to read cells configuration. -# If given, overrides reading cells from the database. (string -# value) -#cells_config= - - -# -# Options defined in nova.cells.weights.mute_child -# - -# Multiplier used to weigh mute children. (The value should be -# negative.) (floating point value) -#mute_weight_multiplier=-10.0 - -# Weight value assigned to mute children. (The value should be -# positive.) (floating point value) -#mute_weight_value=1000.0 - - -# -# Options defined in nova.cells.weights.ram_by_instance_type -# - -# Multiplier used for weighing ram. Negative numbers mean to -# stack vs spread. (floating point value) -#ram_weight_multiplier=10.0 - - -# -# Options defined in nova.cells.weights.weight_offset -# - -# Multiplier used to weigh offset weigher. (floating point -# value) -#offset_weight_multiplier=1.0 - - -[cinder] - -# -# Options defined in nova.volume.cinder -# - -# Info to match when looking for cinder in the service -# catalog. Format is: separated values of the form: -# :: (string value) -# Deprecated group/name - [DEFAULT]/cinder_catalog_info -#catalog_info=volume:cinder:publicURL - -# Override service catalog lookup with template for cinder -# endpoint e.g. http://localhost:8776/v1/%(project_id)s -# (string value) -# Deprecated group/name - [DEFAULT]/cinder_endpoint_template -#endpoint_template= - -# Region name of this node (string value) -# Deprecated group/name - [DEFAULT]/os_region_name -#os_region_name= - -# Location of ca certificates file to use for cinder client -# requests. (string value) -# Deprecated group/name - [DEFAULT]/cinder_ca_certificates_file -#ca_certificates_file= - -# Number of cinderclient retries on failed http calls (integer -# value) -# Deprecated group/name - [DEFAULT]/cinder_http_retries -#http_retries=3 - -# HTTP inactivity timeout (in seconds) (integer value) -# Deprecated group/name - [DEFAULT]/cinder_http_timeout -#http_timeout= - -# Allow to perform insecure SSL requests to cinder (boolean -# value) -# Deprecated group/name - [DEFAULT]/cinder_api_insecure -#api_insecure=false - -# Allow attach between instance and volume in different -# availability zones. (boolean value) -# Deprecated group/name - [DEFAULT]/cinder_cross_az_attach -#cross_az_attach=true - - -[conductor] - -# -# Options defined in nova.conductor.api -# - -# Perform nova-conductor operations locally (boolean value) -use_local=true - -# The topic on which conductor nodes listen (string value) -#topic=conductor - -# Full class name for the Manager for conductor (string value) -#manager=nova.conductor.manager.ConductorManager - -# Number of workers for OpenStack Conductor service. The -# default will be the number of CPUs available. (integer -# value) -#workers= - - -[ephemeral_storage_encryption] - -# -# Options defined in nova.compute.api -# - -# Whether to encrypt ephemeral storage (boolean value) -#enabled=false - -# The cipher and mode to be used to encrypt ephemeral storage. -# Which ciphers are available ciphers depends on kernel -# support. See /proc/crypto for the list of available options. -# (string value) -#cipher=aes-xts-plain64 - -# The bit length of the encryption key to be used to encrypt -# ephemeral storage (in XTS mode only half of the bits are -# used for encryption key) (integer value) -#key_size=512 - - -[glance] - -# -# Options defined in nova.image.glance -# - -# Default glance hostname or IP address (string value) -# Deprecated group/name - [DEFAULT]/glance_host -host={{ CONTROLLER_HOST_ADDRESS }} - -# Default glance port (integer value) -# Deprecated group/name - [DEFAULT]/glance_port -port=9292 - -# Default protocol to use when connecting to glance. Set to -# https for SSL. (string value) -# Deprecated group/name - [DEFAULT]/glance_protocol -protocol=http - -# A list of the glance api servers available to nova. Prefix -# with https:// for ssl-based glance api servers. -# ([hostname|ip]:port) (list value) -# Deprecated group/name - [DEFAULT]/glance_api_servers -api_servers=$host:$port - -# Allow to perform insecure SSL (https) requests to glance -# (boolean value) -# Deprecated group/name - [DEFAULT]/glance_api_insecure -#api_insecure=false - -# Number of retries when downloading an image from glance -# (integer value) -# Deprecated group/name - [DEFAULT]/glance_num_retries -#num_retries=0 - -# A list of url scheme that can be downloaded directly via the -# direct_url. Currently supported schemes: [file]. (list -# value) -#allowed_direct_url_schemes= - - -[hyperv] - -# -# Options defined in nova.virt.hyperv.pathutils -# - -# The name of a Windows share name mapped to the -# "instances_path" dir and used by the resize feature to copy -# files to the target host. If left blank, an administrative -# share will be used, looking for the same "instances_path" -# used locally (string value) -#instances_path_share= - - -# -# Options defined in nova.virt.hyperv.utilsfactory -# - -# Force V1 WMI utility classes (boolean value) -#force_hyperv_utils_v1=false - -# Force V1 volume utility class (boolean value) -#force_volumeutils_v1=false - - -# -# Options defined in nova.virt.hyperv.vif -# - -# External virtual switch Name, if not provided, the first -# external virtual switch is used (string value) -#vswitch_name= - - -# -# Options defined in nova.virt.hyperv.vmops -# - -# Required for live migration among hosts with different CPU -# features (boolean value) -#limit_cpu_features=false - -# Sets the admin password in the config drive image (boolean -# value) -#config_drive_inject_password=false - -# Path of qemu-img command which is used to convert between -# different image types (string value) -#qemu_img_cmd=qemu-img.exe - -# Attaches the Config Drive image as a cdrom drive instead of -# a disk drive (boolean value) -#config_drive_cdrom=false - -# Enables metrics collections for an instance by using -# Hyper-V's metric APIs. Collected data can by retrieved by -# other apps and services, e.g.: Ceilometer. Requires Hyper-V -# / Windows Server 2012 and above (boolean value) -#enable_instance_metrics_collection=false - -# Enables dynamic memory allocation (ballooning) when set to a -# value greater than 1. The value expresses the ratio between -# the total RAM assigned to an instance and its startup RAM -# amount. For example a ratio of 2.0 for an instance with -# 1024MB of RAM implies 512MB of RAM allocated at startup -# (floating point value) -#dynamic_memory_ratio=1.0 - -# Number of seconds to wait for instance to shut down after -# soft reboot request is made. We fall back to hard reboot if -# instance does not shutdown within this window. (integer -# value) -#wait_soft_reboot_seconds=60 - - -# -# Options defined in nova.virt.hyperv.volumeops -# - -# The number of times to retry to attach a volume (integer -# value) -#volume_attach_retry_count=10 - -# Interval between volume attachment attempts, in seconds -# (integer value) -#volume_attach_retry_interval=5 - -# The number of times to retry checking for a disk mounted via -# iSCSI. (integer value) -#mounted_disk_query_retry_count=10 - -# Interval between checks for a mounted iSCSI disk, in -# seconds. (integer value) -#mounted_disk_query_retry_interval=5 - - -[image_file_url] - -# -# Options defined in nova.image.download.file -# - -# List of file systems that are configured in this file in the -# image_file_url: sections (list value) -#filesystems= - - -[ironic] - -# -# Options defined in nova.virt.ironic.driver -# - -# Version of Ironic API service endpoint. (integer value) -#api_version=1 - -# URL for Ironic API endpoint. (string value) -api_endpoint=http://{{ CONTROLLER_HOST_ADDRESS }}:6385/v1 - -# Ironic keystone admin name (string value) -admin_username={{ IRONIC_SERVICE_USER }} - -# Ironic keystone admin password. (string value) -admin_password={{ IRONIC_SERVICE_PASSWORD }} - -# Ironic keystone auth token. (string value) -#admin_auth_token= - -# Keystone public API endpoint. (string value) -admin_url=http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 - -# Log level override for ironicclient. Set this in order to -# override the global "default_log_levels", "verbose", and -# "debug" settings. (string value) -#client_log_level= - -# Ironic keystone tenant name. (string value) -admin_tenant_name=service - -# How many retries when a request does conflict. (integer -# value) -#api_max_retries=60 - -# How often to retry in seconds when a request does conflict -# (integer value) -#api_retry_interval=2 - - -[keymgr] - -# -# Options defined in nova.keymgr -# - -# The full class name of the key manager API class (string -# value) -#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager - - -# -# Options defined in nova.keymgr.conf_key_mgr -# - -# Fixed key returned by key manager, specified in hex (string -# value) -#fixed_key= - - -[keystone_authtoken] - -# -# Options defined in keystonemiddleware.auth_token -# - -# Prefix to prepend at the beginning of the path. Deprecated, -# use identity_uri. (string value) -#auth_admin_prefix= - -# Host providing the admin Identity API endpoint. Deprecated, -# use identity_uri. (string value) -#auth_host=127.0.0.1 - -# Port of the admin Identity API endpoint. Deprecated, use -# identity_uri. (integer value) -auth_port=35357 - -# Protocol of the admin Identity API endpoint (http or https). -# Deprecated, use identity_uri. (string value) -auth_protocol=http - -# Complete public Identity API endpoint (string value) -auth_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0 - -# Complete admin Identity API endpoint. This should specify -# the unversioned root endpoint e.g. https://localhost:35357/ -# (string value) -identity_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:35357 - -# API version of the admin Identity API endpoint (string -# value) -auth_version=v2.0 - -# Do not handle authorization requests within the middleware, -# but delegate the authorization decision to downstream WSGI -# components (boolean value) -#delay_auth_decision=false - -# Request timeout value for communicating with Identity API -# server. (boolean value) -#http_connect_timeout= - -# How many times are we trying to reconnect when communicating -# with Identity API Server. (integer value) -#http_request_max_retries=3 - -# This option is deprecated and may be removed in a future -# release. Single shared secret with the Keystone -# configuration used for bootstrapping a Keystone -# installation, or otherwise bypassing the normal -# authentication process. This option should not be used, use -# `admin_user` and `admin_password` instead. (string value) -#admin_token= - -# Keystone account username (string value) -admin_user={{ NOVA_SERVICE_USER }} - -# Keystone account password (string value) -admin_password={{ NOVA_SERVICE_PASSWORD }} - -# Keystone service account tenant name to validate user tokens -# (string value) -admin_tenant_name=service - -# Env key for the swift cache (string value) -#cache= - -# Required if Keystone server requires client certificate -# (string value) -#certfile= - -# Required if Keystone server requires client certificate -# (string value) -#keyfile= - -# A PEM encoded Certificate Authority to use when verifying -# HTTPs connections. Defaults to system CAs. (string value) -#cafile= - -# Verify HTTPS connections. (boolean value) -#insecure=false - -# Directory used to cache files related to PKI tokens (string -# value) -#signing_dir= - -# Optionally specify a list of memcached server(s) to use for -# caching. If left undefined, tokens will instead be cached -# in-process. (list value) -# Deprecated group/name - [DEFAULT]/memcache_servers -#memcached_servers= - -# In order to prevent excessive effort spent validating -# tokens, the middleware caches previously-seen tokens for a -# configurable duration (in seconds). Set to -1 to disable -# caching completely. (integer value) -#token_cache_time=300 - -# Determines the frequency at which the list of revoked tokens -# is retrieved from the Identity service (in seconds). A high -# number of revocation events combined with a low cache -# duration may significantly reduce performance. (integer -# value) -#revocation_cache_time=10 - -# (optional) if defined, indicate whether token data should be -# authenticated or authenticated and encrypted. Acceptable -# values are MAC or ENCRYPT. If MAC, token data is -# authenticated (with HMAC) in the cache. If ENCRYPT, token -# data is encrypted and authenticated in the cache. If the -# value is not one of these options or empty, auth_token will -# raise an exception on initialization. (string value) -#memcache_security_strategy= - -# (optional, mandatory if memcache_security_strategy is -# defined) this string is used for key derivation. (string -# value) -#memcache_secret_key= - -# (optional) number of seconds memcached server is considered -# dead before it is tried again. (integer value) -#memcache_pool_dead_retry=300 - -# (optional) max total number of open connections to every -# memcached server. (integer value) -#memcache_pool_maxsize=10 - -# (optional) socket timeout in seconds for communicating with -# a memcache server. (integer value) -#memcache_pool_socket_timeout=3 - -# (optional) number of seconds a connection to memcached is -# held unused in the pool before it is closed. (integer value) -#memcache_pool_unused_timeout=60 - -# (optional) number of seconds that an operation will wait to -# get a memcache client connection from the pool. (integer -# value) -#memcache_pool_conn_get_timeout=10 - -# (optional) use the advanced (eventlet safe) memcache client -# pool. The advanced pool will only work under python 2.x. -# (boolean value) -#memcache_use_advanced_pool=false - -# (optional) indicate whether to set the X-Service-Catalog -# header. If False, middleware will not ask for service -# catalog on token validation and will not set the X-Service- -# Catalog header. (boolean value) -#include_service_catalog=true - -# Used to control the use and type of token binding. Can be -# set to: "disabled" to not check token binding. "permissive" -# (default) to validate binding information if the bind type -# is of a form known to the server and ignore it if not. -# "strict" like "permissive" but if the bind type is unknown -# the token will be rejected. "required" any form of token -# binding is needed to be allowed. Finally the name of a -# binding method that must be present in tokens. (string -# value) -#enforce_token_bind=permissive - -# If true, the revocation list will be checked for cached -# tokens. This requires that PKI tokens are configured on the -# Keystone server. (boolean value) -#check_revocations_for_cached=false - -# Hash algorithms to use for hashing PKI tokens. This may be a -# single algorithm or multiple. The algorithms are those -# supported by Python standard hashlib.new(). The hashes will -# be tried in the order given, so put the preferred one first -# for performance. The result of the first hash will be stored -# in the cache. This will typically be set to multiple values -# only while migrating from a less secure algorithm to a more -# secure one. Once all the old tokens are expired this option -# should be set to a single value for better performance. -# (list value) -#hash_algorithms=md5 - - -[libvirt] - -# -# Options defined in nova.virt.libvirt.driver -# - -# Rescue ami image. This will not be used if an image id is -# provided by the user. (string value) -#rescue_image_id= - -# Rescue aki image (string value) -#rescue_kernel_id= - -# Rescue ari image (string value) -#rescue_ramdisk_id= - -# Libvirt domain type (valid options are: kvm, lxc, qemu, uml, -# xen) (string value) -virt_type={{ NOVA_VIRT_TYPE }} - -# Override the default libvirt URI (which is dependent on -# virt_type) (string value) -#connection_uri= - -# Inject the admin password at boot time, without an agent. -# (boolean value) -#inject_password=false - -# Inject the ssh public key at boot time (boolean value) -#inject_key=false - -# The partition to inject to : -2 => disable, -1 => inspect -# (libguestfs only), 0 => not partitioned, >0 => partition -# number (integer value) -#inject_partition=-2 - -# Sync virtual and real mouse cursors in Windows VMs (boolean -# value) -#use_usb_tablet=true - -# Migration target URI (any included "%s" is replaced with the -# migration target hostname) (string value) -#live_migration_uri=qemu+tcp://%s/system - -# Migration flags to be set for live migration (string value) -#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED - -# Migration flags to be set for block migration (string value) -#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_INC - -# Maximum bandwidth to be used during migration, in Mbps -# (integer value) -#live_migration_bandwidth=0 - -# Snapshot image format (valid options are : raw, qcow2, vmdk, -# vdi). Defaults to same as source image (string value) -#snapshot_image_format= - -# DEPRECATED. Libvirt handlers for remote volumes. This option -# is deprecated and will be removed in the Kilo release. (list -# value) -#volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver - -# Override the default disk prefix for the devices attached to -# a server, which is dependent on virt_type. (valid options -# are: sd, xvd, uvd, vd) (string value) -#disk_prefix= - -# Number of seconds to wait for instance to shut down after -# soft reboot request is made. We fall back to hard reboot if -# instance does not shutdown within this window. (integer -# value) -#wait_soft_reboot_seconds=120 - -# Set to "host-model" to clone the host CPU feature flags; to -# "host-passthrough" to use the host CPU model exactly; to -# "custom" to use a named CPU model; to "none" to not set any -# CPU model. If virt_type="kvm|qemu", it will default to -# "host-model", otherwise it will default to "none" (string -# value) -#cpu_mode= - -# Set to a named libvirt CPU model (see names listed in -# /usr/share/libvirt/cpu_map.xml). Only has effect if -# cpu_mode="custom" and virt_type="kvm|qemu" (string value) -#cpu_model= - -# Location where libvirt driver will store snapshots before -# uploading them to image service (string value) -#snapshots_directory=$instances_path/snapshots - -# Location where the Xen hvmloader is kept (string value) -#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader - -# Specific cachemodes to use for different disk types e.g: -# file=directsync,block=none (list value) -#disk_cachemodes= - -# A path to a device that will be used as source of entropy on -# the host. Permitted options are: /dev/random or /dev/hwrng -# (string value) -#rng_dev_path= - -# For qemu or KVM guests, set this option to specify a default -# machine type per host architecture. You can find a list of -# supported machine types in your environment by checking the -# output of the "virsh capabilities"command. The format of the -# value for this config option is host-arch=machine-type. For -# example: x86_64=machinetype1,armv7l=machinetype2 (list -# value) -#hw_machine_type= - -# The data source used to the populate the host "serial" UUID -# exposed to guest in the virtual BIOS. Permitted options are -# "hardware", "os", "none" or "auto" (default). (string value) -#sysinfo_serial=auto - -# A number of seconds to memory usage statistics period. Zero -# or negative value mean to disable memory usage statistics. -# (integer value) -#mem_stats_period_seconds=10 - -# List of uid targets and ranges.Syntax is guest-uid:host- -# uid:countMaximum of 5 allowed. (list value) -#uid_maps= - -# List of guid targets and ranges.Syntax is guest-gid:host- -# gid:countMaximum of 5 allowed. (list value) -#gid_maps= - - -# -# Options defined in nova.virt.libvirt.imagebackend -# - -# VM Images format. Acceptable values are: raw, qcow2, lvm, -# rbd, default. If default is specified, then use_cow_images -# flag is used instead of this one. (string value) -#images_type=default - -# LVM Volume Group that is used for VM images, when you -# specify images_type=lvm. (string value) -#images_volume_group= - -# Create sparse logical volumes (with virtualsize) if this -# flag is set to True. (boolean value) -#sparse_logical_volumes=false - -# Method used to wipe old volumes (valid options are: none, -# zero, shred) (string value) -#volume_clear=zero - -# Size in MiB to wipe at start of old volumes. 0 => all -# (integer value) -#volume_clear_size=0 - -# The RADOS pool in which rbd volumes are stored (string -# value) -#images_rbd_pool=rbd - -# Path to the ceph configuration file to use (string value) -#images_rbd_ceph_conf= - -# Discard option for nova managed disks (valid options are: -# ignore, unmap). Need Libvirt(1.0.6) Qemu1.5 (raw format) -# Qemu1.6(qcow2 format) (string value) -#hw_disk_discard= - - -# -# Options defined in nova.virt.libvirt.imagecache -# - -# Allows image information files to be stored in non-standard -# locations (string value) -#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info - -# Should unused kernel images be removed? This is only safe to -# enable if all compute nodes have been updated to support -# this option. This will be enabled by default in future. -# (boolean value) -#remove_unused_kernels=false - -# Unused resized base images younger than this will not be -# removed (integer value) -#remove_unused_resized_minimum_age_seconds=3600 - -# Write a checksum for files in _base to disk (boolean value) -#checksum_base_images=false - -# How frequently to checksum base images (integer value) -#checksum_interval_seconds=3600 - - -# -# Options defined in nova.virt.libvirt.utils -# - -# Compress snapshot images when possible. This currently -# applies exclusively to qcow2 images (boolean value) -#snapshot_compression=false - - -# -# Options defined in nova.virt.libvirt.vif -# - -# Use virtio for bridge interfaces with KVM/QEMU (boolean -# value) -#use_virtio_for_bridges=true - - -# -# Options defined in nova.virt.libvirt.volume -# - -# Number of times to rescan iSCSI target to find volume -# (integer value) -#num_iscsi_scan_tries=5 - -# Number of times to rescan iSER target to find volume -# (integer value) -#num_iser_scan_tries=5 - -# The RADOS client name for accessing rbd volumes (string -# value) -#rbd_user= - -# The libvirt UUID of the secret for the rbd_uservolumes -# (string value) -#rbd_secret_uuid= - -# Directory where the NFS volume is mounted on the compute -# node (string value) -#nfs_mount_point_base=$state_path/mnt - -# Mount options passedf to the NFS client. See section of the -# nfs man page for details (string value) -#nfs_mount_options= - -# Number of times to rediscover AoE target to find volume -# (integer value) -#num_aoe_discover_tries=3 - -# Directory where the glusterfs volume is mounted on the -# compute node (string value) -#glusterfs_mount_point_base=$state_path/mnt - -# Use multipath connection of the iSCSI volume (boolean value) -#iscsi_use_multipath=false - -# Use multipath connection of the iSER volume (boolean value) -#iser_use_multipath=false - -# Path or URL to Scality SOFS configuration file (string -# value) -#scality_sofs_config= - -# Base dir where Scality SOFS shall be mounted (string value) -#scality_sofs_mount_point=$state_path/scality - -# Protocols listed here will be accessed directly from QEMU. -# Currently supported protocols: [gluster] (list value) -#qemu_allowed_storage_drivers= - - -[matchmaker_redis] - -# -# Options defined in oslo.messaging -# - -# Host to locate redis. (string value) -#host=127.0.0.1 - -# Use this port to connect to redis host. (integer value) -#port=6379 - -# Password for Redis server (optional). (string value) -#password= - - -[matchmaker_ring] - -# -# Options defined in oslo.messaging -# - -# Matchmaker ring file (JSON). (string value) -# Deprecated group/name - [DEFAULT]/matchmaker_ringfile -#ringfile=/etc/oslo/matchmaker_ring.json - - -[metrics] - -# -# Options defined in nova.scheduler.weights.metrics -# - -# Multiplier used for weighing metrics. (floating point value) -#weight_multiplier=1.0 - -# How the metrics are going to be weighed. This should be in -# the form of "=, =, ...", where -# is one of the metrics to be weighed, and is -# the corresponding ratio. So for "name1=1.0, name2=-1.0" The -# final weight would be name1.value * 1.0 + name2.value * -# -1.0. (list value) -#weight_setting= - -# How to treat the unavailable metrics. When a metric is NOT -# available for a host, if it is set to be True, it would -# raise an exception, so it is recommended to use the -# scheduler filter MetricFilter to filter out those hosts. If -# it is set to be False, the unavailable metric would be -# treated as a negative factor in weighing process, the -# returned value would be set by the option -# weight_of_unavailable. (boolean value) -#required=true - -# The final weight value to be returned if required is set to -# False and any one of the metrics set by weight_setting is -# unavailable. (floating point value) -#weight_of_unavailable=-10000.0 - - -[neutron] - -# -# Options defined in nova.api.metadata.handler -# - -# Set flag to indicate Neutron will proxy metadata requests -# and resolve instance ids. (boolean value) -# Deprecated group/name - [DEFAULT]/service_neutron_metadata_proxy -service_metadata_proxy=True - -# Shared secret to validate proxies Neutron metadata requests -# (string value) -# Deprecated group/name - [DEFAULT]/neutron_metadata_proxy_shared_secret -metadata_proxy_shared_secret={{ METADATA_PROXY_SHARED_SECRET }} - - -# -# Options defined in nova.network.neutronv2.api -# - -# URL for connecting to neutron (string value) -# Deprecated group/name - [DEFAULT]/neutron_url -url=http://{{ CONTROLLER_HOST_ADDRESS }}:9696 - -# Timeout value for connecting to neutron in seconds (integer -# value) -# Deprecated group/name - [DEFAULT]/neutron_url_timeout -#url_timeout=30 - -# User id for connecting to neutron in admin context (string -# value) -#admin_user_id= - -# Username for connecting to neutron in admin context (string -# value) -# Deprecated group/name - [DEFAULT]/neutron_admin_username -admin_username={{ NEUTRON_SERVICE_USER }} - -# Password for connecting to neutron in admin context (string -# value) -# Deprecated group/name - [DEFAULT]/neutron_admin_password -admin_password={{ NEUTRON_SERVICE_PASSWORD }} - -# Tenant id for connecting to neutron in admin context (string -# value) -# Deprecated group/name - [DEFAULT]/neutron_admin_tenant_id -#admin_tenant_id= - -# Tenant name for connecting to neutron in admin context. This -# option will be ignored if neutron_admin_tenant_id is set. -# Note that with Keystone V3 tenant names are only unique -# within a domain. (string value) -# Deprecated group/name - [DEFAULT]/neutron_admin_tenant_name -admin_tenant_name=service - -# Region name for connecting to neutron in admin context -# (string value) -# Deprecated group/name - [DEFAULT]/neutron_region_name -#region_name= - -# Authorization URL for connecting to neutron in admin context -# (string value) -# Deprecated group/name - [DEFAULT]/neutron_admin_auth_url -admin_auth_url=http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0 - -# If set, ignore any SSL validation issues (boolean value) -# Deprecated group/name - [DEFAULT]/neutron_api_insecure -#api_insecure=false - -# Authorization strategy for connecting to neutron in admin -# context (string value) -# Deprecated group/name - [DEFAULT]/neutron_auth_strategy -auth_strategy=keystone - -# Name of Integration Bridge used by Open vSwitch (string -# value) -# Deprecated group/name - [DEFAULT]/neutron_ovs_bridge -#ovs_bridge=br-int - -# Number of seconds before querying neutron for extensions -# (integer value) -# Deprecated group/name - [DEFAULT]/neutron_extension_sync_interval -#extension_sync_interval=600 - -# Location of CA certificates file to use for neutron client -# requests. (string value) -# Deprecated group/name - [DEFAULT]/neutron_ca_certificates_file -#ca_certificates_file= - -# Allow an instance to have multiple vNICs attached to the -# same Neutron network. (boolean value) -#allow_duplicate_networks=false - - -[osapi_v3] - -# -# Options defined in nova.api.openstack -# - -# Whether the V3 API is enabled or not (boolean value) -#enabled=false - -# A list of v3 API extensions to never load. Specify the -# extension aliases here. (list value) -#extensions_blacklist= - -# If the list is not empty then a v3 API extension will only -# be loaded if it exists in this list. Specify the extension -# aliases here. (list value) -#extensions_whitelist= - - -[rdp] - -# -# Options defined in nova.rdp -# - -# Location of RDP html5 console proxy, in the form -# "http://127.0.0.1:6083/" (string value) -#html5_proxy_base_url=http://127.0.0.1:6083/ - -# Enable RDP related features (boolean value) -#enabled=false - - -[serial_console] - -# -# Options defined in nova.cmd.serialproxy -# - -# Host on which to listen for incoming requests (string value) -serialproxy_host=127.0.0.1 - -# Port on which to listen for incoming requests (integer -# value) -#serialproxy_port=6083 - - -# -# Options defined in nova.console.serial -# - -# Enable serial console related features (boolean value) -enabled=false - -# Range of TCP ports to use for serial ports on compute hosts -# (string value) -#port_range=10000:20000 - -# Location of serial console proxy. (string value) -#base_url=ws://127.0.0.1:6083/ - -# IP address on which instance serial console should listen -# (string value) -#listen=127.0.0.1 - -# The address to which proxy clients (like nova-serialproxy) -# should connect (string value) -#proxyclient_address=127.0.0.1 - - -[spice] - -# -# Options defined in nova.cmd.spicehtml5proxy -# - -# Host on which to listen for incoming requests (string value) -# Deprecated group/name - [DEFAULT]/spicehtml5proxy_host -#html5proxy_host=0.0.0.0 - -# Port on which to listen for incoming requests (integer -# value) -# Deprecated group/name - [DEFAULT]/spicehtml5proxy_port -#html5proxy_port=6082 - - -# -# Options defined in nova.spice -# - -# Location of spice HTML5 console proxy, in the form -# "http://127.0.0.1:6082/spice_auto.html" (string value) -#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html - -# IP address on which instance spice server should listen -# (string value) -#server_listen=127.0.0.1 - -# The address to which proxy clients (like nova- -# spicehtml5proxy) should connect (string value) -#server_proxyclient_address=127.0.0.1 - -# Enable spice related features (boolean value) -enabled=false - -# Enable spice guest agent support (boolean value) -#agent_enabled=true - -# Keymap for spice (string value) -#keymap=en-us - - -[ssl] - -# -# Options defined in nova.openstack.common.sslutils -# - -# CA certificate file to use to verify connecting clients. -# (string value) -#ca_file= - -# Certificate file to use when starting the server securely. -# (string value) -#cert_file= - -# Private key file to use when starting the server securely. -# (string value) -#key_file= - - -[trusted_computing] - -# -# Options defined in nova.scheduler.filters.trusted_filter -# - -# Attestation server HTTP (string value) -#attestation_server= - -# Attestation server Cert file for Identity verification -# (string value) -#attestation_server_ca_file= - -# Attestation server port (string value) -#attestation_port=8443 - -# Attestation web API URL (string value) -#attestation_api_url=/OpenAttestationWebServices/V1.0 - -# Attestation authorization blob - must change (string value) -#attestation_auth_blob= - -# Attestation status cache valid period length (integer value) -#attestation_auth_timeout=60 - -# Disable SSL cert verification for Attestation service -# (boolean value) -#attestation_insecure_ssl=false - - -[upgrade_levels] - -# -# Options defined in nova.baserpc -# - -# Set a version cap for messages sent to the base api in any -# service (string value) -#baseapi= - - -# -# Options defined in nova.cells.rpc_driver -# - -# Set a version cap for messages sent between cells services -# (string value) -#intercell= - - -# -# Options defined in nova.cells.rpcapi -# - -# Set a version cap for messages sent to local cells services -# (string value) -#cells= - - -# -# Options defined in nova.cert.rpcapi -# - -# Set a version cap for messages sent to cert services (string -# value) -#cert= - - -# -# Options defined in nova.compute.rpcapi -# - -# Set a version cap for messages sent to compute services. If -# you plan to do a live upgrade from havana to icehouse, you -# should set this option to "icehouse-compat" before beginning -# the live upgrade procedure. (string value) -#compute= - - -# -# Options defined in nova.conductor.rpcapi -# - -# Set a version cap for messages sent to conductor services -# (string value) -#conductor= - - -# -# Options defined in nova.console.rpcapi -# - -# Set a version cap for messages sent to console services -# (string value) -#console= - - -# -# Options defined in nova.consoleauth.rpcapi -# - -# Set a version cap for messages sent to consoleauth services -# (string value) -#consoleauth= - - -# -# Options defined in nova.network.rpcapi -# - -# Set a version cap for messages sent to network services -# (string value) -#network= - - -# -# Options defined in nova.scheduler.rpcapi -# - -# Set a version cap for messages sent to scheduler services -# (string value) -#scheduler= - - -[vmware] - -# -# Options defined in nova.virt.vmwareapi.driver -# - -# Hostname or IP address for connection to VMware VC host. -# (string value) -#host_ip= - -# Port for connection to VMware VC host. (integer value) -#host_port=443 - -# Username for connection to VMware VC host. (string value) -#host_username= - -# Password for connection to VMware VC host. (string value) -#host_password= - -# Name of a VMware Cluster ComputeResource. (multi valued) -#cluster_name= - -# Regex to match the name of a datastore. (string value) -#datastore_regex= - -# The interval used for polling of remote tasks. (floating -# point value) -#task_poll_interval=0.5 - -# The number of times we retry on failures, e.g., socket -# error, etc. (integer value) -#api_retry_count=10 - -# VNC starting port (integer value) -#vnc_port=5900 - -# Total number of VNC ports (integer value) -#vnc_port_total=10000 - -# Whether to use linked clone (boolean value) -#use_linked_clone=true - -# Optional VIM Service WSDL Location e.g -# http:///vimService.wsdl. Optional over-ride to -# default location for bug work-arounds (string value) -#wsdl_location= - - -# -# Options defined in nova.virt.vmwareapi.vif -# - -# Physical ethernet adapter name for vlan networking (string -# value) -#vlan_interface=vmnic0 - -# Name of Integration Bridge (string value) -#integration_bridge=br-int - - -# -# Options defined in nova.virt.vmwareapi.vim_util -# - -# The maximum number of ObjectContent data objects that should -# be returned in a single result. A positive value will cause -# the operation to suspend the retrieval when the count of -# objects reaches the specified maximum. The server may still -# limit the count to something less than the configured value. -# Any remaining objects may be retrieved with additional -# requests. (integer value) -#maximum_objects=100 - - -[xenserver] - -# -# Options defined in nova.virt.xenapi.agent -# - -# Number of seconds to wait for agent reply (integer value) -#agent_timeout=30 - -# Number of seconds to wait for agent to be fully operational -# (integer value) -#agent_version_timeout=300 - -# Number of seconds to wait for agent reply to resetnetwork -# request (integer value) -#agent_resetnetwork_timeout=60 - -# Specifies the path in which the XenAPI guest agent should be -# located. If the agent is present, network configuration is -# not injected into the image. Used if -# compute_driver=xenapi.XenAPIDriver and flat_injected=True -# (string value) -#agent_path=usr/sbin/xe-update-networking - -# Disables the use of the XenAPI agent in any image regardless -# of what image properties are present. (boolean value) -#disable_agent=false - -# Determines if the XenAPI agent should be used when the image -# used does not contain a hint to declare if the agent is -# present or not. The hint is a glance property -# "xenapi_use_agent" that has the value "True" or "False". -# Note that waiting for the agent when it is not present will -# significantly increase server boot times. (boolean value) -#use_agent_default=false - - -# -# Options defined in nova.virt.xenapi.client.session -# - -# Timeout in seconds for XenAPI login. (integer value) -#login_timeout=10 - -# Maximum number of concurrent XenAPI connections. Used only -# if compute_driver=xenapi.XenAPIDriver (integer value) -#connection_concurrent=5 - - -# -# Options defined in nova.virt.xenapi.driver -# - -# URL for connection to XenServer/Xen Cloud Platform. A -# special value of unix://local can be used to connect to the -# local unix socket. Required if -# compute_driver=xenapi.XenAPIDriver (string value) -#connection_url= - -# Username for connection to XenServer/Xen Cloud Platform. -# Used only if compute_driver=xenapi.XenAPIDriver (string -# value) -#connection_username=root - -# Password for connection to XenServer/Xen Cloud Platform. -# Used only if compute_driver=xenapi.XenAPIDriver (string -# value) -#connection_password= - -# The interval used for polling of coalescing vhds. Used only -# if compute_driver=xenapi.XenAPIDriver (floating point value) -#vhd_coalesce_poll_interval=5.0 - -# Ensure compute service is running on host XenAPI connects -# to. (boolean value) -#check_host=true - -# Max number of times to poll for VHD to coalesce. Used only -# if compute_driver=xenapi.XenAPIDriver (integer value) -#vhd_coalesce_max_attempts=20 - -# Base path to the storage repository (string value) -#sr_base_path=/var/run/sr-mount - -# The iSCSI Target Host (string value) -#target_host= - -# The iSCSI Target Port, default is port 3260 (string value) -#target_port=3260 - -# IQN Prefix (string value) -#iqn_prefix=iqn.2010-10.org.openstack - -# Used to enable the remapping of VBD dev (Works around an -# issue in Ubuntu Maverick) (boolean value) -#remap_vbd_dev=false - -# Specify prefix to remap VBD dev to (ex. /dev/xvdb -> -# /dev/sdb) (string value) -#remap_vbd_dev_prefix=sd - - -# -# Options defined in nova.virt.xenapi.image.bittorrent -# - -# Base URL for torrent files. (string value) -#torrent_base_url= - -# Probability that peer will become a seeder. (1.0 = 100%) -# (floating point value) -#torrent_seed_chance=1.0 - -# Number of seconds after downloading an image via BitTorrent -# that it should be seeded for other peers. (integer value) -#torrent_seed_duration=3600 - -# Cached torrent files not accessed within this number of -# seconds can be reaped (integer value) -#torrent_max_last_accessed=86400 - -# Beginning of port range to listen on (integer value) -#torrent_listen_port_start=6881 - -# End of port range to listen on (integer value) -#torrent_listen_port_end=6891 - -# Number of seconds a download can remain at the same progress -# percentage w/o being considered a stall (integer value) -#torrent_download_stall_cutoff=600 - -# Maximum number of seeder processes to run concurrently -# within a given dom0. (-1 = no limit) (integer value) -#torrent_max_seeder_processes_per_host=1 - - -# -# Options defined in nova.virt.xenapi.pool -# - -# To use for hosts with different CPUs (boolean value) -#use_join_force=true - - -# -# Options defined in nova.virt.xenapi.vif -# - -# Name of Integration Bridge used by Open vSwitch (string -# value) -#ovs_integration_bridge=xapi1 - - -# -# Options defined in nova.virt.xenapi.vm_utils -# - -# Cache glance images locally. `all` will cache all images, -# `some` will only cache images that have the image_property -# `cache_in_nova=True`, and `none` turns off caching entirely -# (string value) -#cache_images=all - -# Compression level for images, e.g., 9 for gzip -9. Range is -# 1-9, 9 being most compressed but most CPU intensive on dom0. -# (integer value) -#image_compression_level= - -# Default OS type (string value) -#default_os_type=linux - -# Time to wait for a block device to be created (integer -# value) -#block_device_creation_timeout=10 - -# Maximum size in bytes of kernel or ramdisk images (integer -# value) -#max_kernel_ramdisk_size=16777216 - -# Filter for finding the SR to be used to install guest -# instances on. To use the Local Storage in default -# XenServer/XCP installations set this flag to other-config -# :i18n-key=local-storage. To select an SR with a different -# matching criteria, you could set it to other- -# config:my_favorite_sr=true. On the other hand, to fall back -# on the Default SR, as displayed by XenCenter, set this flag -# to: default-sr:true (string value) -#sr_matching_filter=default-sr:true - -# Whether to use sparse_copy for copying data on a resize down -# (False will use standard dd). This speeds up resizes down -# considerably since large runs of zeros won't have to be -# rsynced (boolean value) -#sparse_copy=true - -# Maximum number of retries to unplug VBD (integer value) -#num_vbd_unplug_retries=10 - -# Whether or not to download images via Bit Torrent -# (all|some|none). (string value) -#torrent_images=none - -# Name of network to use for booting iPXE ISOs (string value) -#ipxe_network_name= - -# URL to the iPXE boot menu (string value) -#ipxe_boot_menu_url= - -# Name and optionally path of the tool used for ISO image -# creation (string value) -#ipxe_mkisofs_cmd=mkisofs - - -# -# Options defined in nova.virt.xenapi.vmops -# - -# Number of seconds to wait for instance to go to running -# state (integer value) -#running_timeout=60 - -# The XenAPI VIF driver using XenServer Network APIs. (string -# value) -#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver - -# Dom0 plugin driver used to handle image uploads. (string -# value) -#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore - - -# -# Options defined in nova.virt.xenapi.volume_utils -# - -# Number of seconds to wait for an SR to settle if the VDI -# does not exist when first introduced (integer value) -#introduce_vdi_retry_wait=20 - - -[zookeeper] - -# -# Options defined in nova.servicegroup.drivers.zk -# - -# The ZooKeeper addresses for servicegroup service in the -# format of host1:port,host2:port,host3:port (string value) -#address= - -# The recv_timeout parameter for the zk session (integer -# value) -#recv_timeout=4000 - -# The prefix used in ZooKeeper to store ephemeral nodes -# (string value) -#sg_prefix=/servicegroups - -# Number of seconds to wait until retrying to join the session -# (integer value) -#sg_retry_interval=5 - -[database] - -# The SQLAlchemy connection string to use to connect to the -# database. (string value) -connection=postgresql://{{ NOVA_DB_USER }}:{{ NOVA_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/nova diff --git a/openstack/usr/share/openstack/nova/policy.json b/openstack/usr/share/openstack/nova/policy.json deleted file mode 100644 index cc5b8ea4..00000000 --- a/openstack/usr/share/openstack/nova/policy.json +++ /dev/null @@ -1,324 +0,0 @@ -{ - "context_is_admin": "role:admin", - "admin_or_owner": "is_admin:True or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - - "cells_scheduler_filter:TargetCellFilter": "is_admin:True", - - "compute:create": "", - "compute:create:attach_network": "", - "compute:create:attach_volume": "", - "compute:create:forced_host": "is_admin:True", - "compute:get_all": "", - "compute:get_all_tenants": "", - "compute:start": "rule:admin_or_owner", - "compute:stop": "rule:admin_or_owner", - "compute:unlock_override": "rule:admin_api", - - "compute:shelve": "", - "compute:shelve_offload": "", - "compute:unshelve": "", - - "compute:volume_snapshot_create": "", - "compute:volume_snapshot_delete": "", - - "admin_api": "is_admin:True", - "compute:v3:servers:start": "rule:admin_or_owner", - "compute:v3:servers:stop": "rule:admin_or_owner", - "compute_extension:v3:os-access-ips:discoverable": "", - "compute_extension:v3:os-access-ips": "", - "compute_extension:accounts": "rule:admin_api", - "compute_extension:admin_actions": "rule:admin_api", - "compute_extension:admin_actions:pause": "rule:admin_or_owner", - "compute_extension:admin_actions:unpause": "rule:admin_or_owner", - "compute_extension:admin_actions:suspend": "rule:admin_or_owner", - "compute_extension:admin_actions:resume": "rule:admin_or_owner", - "compute_extension:admin_actions:lock": "rule:admin_or_owner", - "compute_extension:admin_actions:unlock": "rule:admin_or_owner", - "compute_extension:admin_actions:resetNetwork": "rule:admin_api", - "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api", - "compute_extension:admin_actions:createBackup": "rule:admin_or_owner", - "compute_extension:admin_actions:migrateLive": "rule:admin_api", - "compute_extension:admin_actions:resetState": "rule:admin_api", - "compute_extension:admin_actions:migrate": "rule:admin_api", - "compute_extension:v3:os-admin-actions": "rule:admin_api", - "compute_extension:v3:os-admin-actions:discoverable": "", - "compute_extension:v3:os-admin-actions:reset_network": "rule:admin_api", - "compute_extension:v3:os-admin-actions:inject_network_info": "rule:admin_api", - "compute_extension:v3:os-admin-actions:reset_state": "rule:admin_api", - "compute_extension:v3:os-admin-password": "", - "compute_extension:v3:os-admin-password:discoverable": "", - "compute_extension:aggregates": "rule:admin_api", - "compute_extension:v3:os-aggregates:discoverable": "", - "compute_extension:v3:os-aggregates:index": "rule:admin_api", - "compute_extension:v3:os-aggregates:create": "rule:admin_api", - "compute_extension:v3:os-aggregates:show": "rule:admin_api", - "compute_extension:v3:os-aggregates:update": "rule:admin_api", - "compute_extension:v3:os-aggregates:delete": "rule:admin_api", - "compute_extension:v3:os-aggregates:add_host": "rule:admin_api", - "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api", - "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api", - "compute_extension:agents": "rule:admin_api", - "compute_extension:v3:os-agents": "rule:admin_api", - "compute_extension:v3:os-agents:discoverable": "", - "compute_extension:attach_interfaces": "", - "compute_extension:v3:os-attach-interfaces": "", - "compute_extension:v3:os-attach-interfaces:discoverable": "", - "compute_extension:baremetal_nodes": "rule:admin_api", - "compute_extension:cells": "rule:admin_api", - "compute_extension:v3:os-cells": "rule:admin_api", - "compute_extension:v3:os-cells:discoverable": "", - "compute_extension:certificates": "", - "compute_extension:v3:os-certificates:create": "", - "compute_extension:v3:os-certificates:show": "", - "compute_extension:v3:os-certificates:discoverable": "", - "compute_extension:cloudpipe": "rule:admin_api", - "compute_extension:cloudpipe_update": "rule:admin_api", - "compute_extension:console_output": "", - "compute_extension:v3:consoles:discoverable": "", - "compute_extension:v3:os-console-output:discoverable": "", - "compute_extension:v3:os-console-output": "", - "compute_extension:consoles": "", - "compute_extension:v3:os-remote-consoles": "", - "compute_extension:v3:os-remote-consoles:discoverable": "", - "compute_extension:createserverext": "", - "compute_extension:v3:os-create-backup:discoverable": "", - "compute_extension:v3:os-create-backup": "rule:admin_or_owner", - "compute_extension:deferred_delete": "", - "compute_extension:v3:os-deferred-delete": "", - "compute_extension:v3:os-deferred-delete:discoverable": "", - "compute_extension:disk_config": "", - "compute_extension:evacuate": "rule:admin_api", - "compute_extension:v3:os-evacuate": "rule:admin_api", - "compute_extension:v3:os-evacuate:discoverable": "", - "compute_extension:extended_server_attributes": "rule:admin_api", - "compute_extension:v3:os-extended-server-attributes": "rule:admin_api", - "compute_extension:v3:os-extended-server-attributes:discoverable": "", - "compute_extension:extended_status": "", - "compute_extension:v3:os-extended-status": "", - "compute_extension:v3:os-extended-status:discoverable": "", - "compute_extension:extended_availability_zone": "", - "compute_extension:v3:os-extended-availability-zone": "", - "compute_extension:v3:os-extended-availability-zone:discoverable": "", - "compute_extension:extended_ips": "", - "compute_extension:extended_ips_mac": "", - "compute_extension:extended_vif_net": "", - "compute_extension:v3:extension_info:discoverable": "", - "compute_extension:extended_volumes": "", - "compute_extension:v3:os-extended-volumes": "", - "compute_extension:v3:os-extended-volumes:swap": "", - "compute_extension:v3:os-extended-volumes:discoverable": "", - "compute_extension:v3:os-extended-volumes:attach": "", - "compute_extension:v3:os-extended-volumes:detach": "", - "compute_extension:fixed_ips": "rule:admin_api", - "compute_extension:flavor_access": "", - "compute_extension:flavor_access:addTenantAccess": "rule:admin_api", - "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api", - "compute_extension:v3:flavor-access": "", - "compute_extension:v3:flavor-access:discoverable": "", - "compute_extension:v3:flavor-access:remove_tenant_access": "rule:admin_api", - "compute_extension:v3:flavor-access:add_tenant_access": "rule:admin_api", - "compute_extension:flavor_disabled": "", - "compute_extension:flavor_rxtx": "", - "compute_extension:v3:os-flavor-rxtx": "", - "compute_extension:v3:os-flavor-rxtx:discoverable": "", - "compute_extension:flavor_swap": "", - "compute_extension:flavorextradata": "", - "compute_extension:flavorextraspecs:index": "", - "compute_extension:flavorextraspecs:show": "", - "compute_extension:flavorextraspecs:create": "rule:admin_api", - "compute_extension:flavorextraspecs:update": "rule:admin_api", - "compute_extension:flavorextraspecs:delete": "rule:admin_api", - "compute_extension:v3:flavors:discoverable": "", - "compute_extension:v3:flavor-extra-specs:discoverable": "", - "compute_extension:v3:flavor-extra-specs:index": "", - "compute_extension:v3:flavor-extra-specs:show": "", - "compute_extension:v3:flavor-extra-specs:create": "rule:admin_api", - "compute_extension:v3:flavor-extra-specs:update": "rule:admin_api", - "compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api", - "compute_extension:flavormanage": "rule:admin_api", - "compute_extension:v3:flavor-manage": "rule:admin_api", - "compute_extension:floating_ip_dns": "", - "compute_extension:floating_ip_pools": "", - "compute_extension:floating_ips": "", - "compute_extension:floating_ips_bulk": "rule:admin_api", - "compute_extension:fping": "", - "compute_extension:fping:all_tenants": "rule:admin_api", - "compute_extension:hide_server_addresses": "is_admin:False", - "compute_extension:v3:os-hide-server-addresses": "is_admin:False", - "compute_extension:v3:os-hide-server-addresses:discoverable": "", - "compute_extension:hosts": "rule:admin_api", - "compute_extension:v3:os-hosts": "rule:admin_api", - "compute_extension:v3:os-hosts:discoverable": "", - "compute_extension:hypervisors": "rule:admin_api", - "compute_extension:v3:os-hypervisors": "rule:admin_api", - "compute_extension:v3:os-hypervisors:discoverable": "", - "compute_extension:image_size": "", - "compute_extension:instance_actions": "", - "compute_extension:v3:os-server-actions": "", - "compute_extension:v3:os-server-actions:discoverable": "", - "compute_extension:instance_actions:events": "rule:admin_api", - "compute_extension:v3:os-server-actions:events": "rule:admin_api", - "compute_extension:instance_usage_audit_log": "rule:admin_api", - "compute_extension:v3:ips:discoverable": "", - "compute_extension:keypairs": "", - "compute_extension:keypairs:index": "", - "compute_extension:keypairs:show": "", - "compute_extension:keypairs:create": "", - "compute_extension:keypairs:delete": "", - "compute_extension:v3:keypairs:discoverable": "", - "compute_extension:v3:keypairs": "", - "compute_extension:v3:keypairs:index": "", - "compute_extension:v3:keypairs:show": "", - "compute_extension:v3:keypairs:create": "", - "compute_extension:v3:keypairs:delete": "", - "compute_extension:v3:os-lock-server:discoverable": "", - "compute_extension:v3:os-lock-server:lock": "rule:admin_or_owner", - "compute_extension:v3:os-lock-server:unlock": "rule:admin_or_owner", - "compute_extension:v3:os-migrate-server:discoverable": "", - "compute_extension:v3:os-migrate-server:migrate": "rule:admin_api", - "compute_extension:v3:os-migrate-server:migrate_live": "rule:admin_api", - "compute_extension:multinic": "", - "compute_extension:v3:os-multinic": "", - "compute_extension:v3:os-multinic:discoverable": "", - "compute_extension:networks": "rule:admin_api", - "compute_extension:networks:view": "", - "compute_extension:networks_associate": "rule:admin_api", - "compute_extension:v3:os-pause-server:discoverable": "", - "compute_extension:v3:os-pause-server:pause": "rule:admin_or_owner", - "compute_extension:v3:os-pause-server:unpause": "rule:admin_or_owner", - "compute_extension:v3:os-pci:pci_servers": "", - "compute_extension:v3:os-pci:discoverable": "", - "compute_extension:v3:os-pci:index": "rule:admin_api", - "compute_extension:v3:os-pci:detail": "rule:admin_api", - "compute_extension:v3:os-pci:show": "rule:admin_api", - "compute_extension:quotas:show": "", - "compute_extension:quotas:update": "rule:admin_api", - "compute_extension:quotas:delete": "rule:admin_api", - "compute_extension:v3:os-quota-sets:discoverable": "", - "compute_extension:v3:os-quota-sets:show": "", - "compute_extension:v3:os-quota-sets:update": "rule:admin_api", - "compute_extension:v3:os-quota-sets:delete": "rule:admin_api", - "compute_extension:v3:os-quota-sets:detail": "rule:admin_api", - "compute_extension:quota_classes": "", - "compute_extension:rescue": "", - "compute_extension:v3:os-rescue": "", - "compute_extension:v3:os-rescue:discoverable": "", - "compute_extension:v3:os-scheduler-hints:discoverable": "", - "compute_extension:security_group_default_rules": "rule:admin_api", - "compute_extension:security_groups": "", - "compute_extension:v3:os-security-groups": "", - "compute_extension:v3:os-security-groups:discoverable": "", - "compute_extension:server_diagnostics": "rule:admin_api", - "compute_extension:v3:os-server-diagnostics": "rule:admin_api", - "compute_extension:v3:os-server-diagnostics:discoverable": "", - "compute_extension:server_groups": "", - "compute_extension:server_password": "", - "compute_extension:v3:os-server-password": "", - "compute_extension:v3:os-server-password:discoverable": "", - "compute_extension:server_usage": "", - "compute_extension:v3:os-server-usage": "", - "compute_extension:v3:os-server-usage:discoverable": "", - "compute_extension:services": "rule:admin_api", - "compute_extension:v3:os-services": "rule:admin_api", - "compute_extension:v3:os-services:discoverable": "", - "compute_extension:v3:server-metadata:discoverable": "", - "compute_extension:v3:servers:discoverable": "", - "compute_extension:shelve": "", - "compute_extension:shelveOffload": "rule:admin_api", - "compute_extension:v3:os-shelve:shelve": "", - "compute_extension:v3:os-shelve:shelve:discoverable": "", - "compute_extension:v3:os-shelve:shelve_offload": "rule:admin_api", - "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner", - "compute_extension:v3:os-suspend-server:discoverable": "", - "compute_extension:v3:os-suspend-server:suspend": "rule:admin_or_owner", - "compute_extension:v3:os-suspend-server:resume": "rule:admin_or_owner", - "compute_extension:simple_tenant_usage:list": "rule:admin_api", - "compute_extension:unshelve": "", - "compute_extension:v3:os-shelve:unshelve": "", - "compute_extension:users": "rule:admin_api", - "compute_extension:v3:os-user-data:discoverable": "", - "compute_extension:virtual_interfaces": "", - "compute_extension:virtual_storage_arrays": "", - "compute_extension:volumes": "", - "compute_extension:volume_attachments:index": "", - "compute_extension:volume_attachments:show": "", - "compute_extension:volume_attachments:create": "", - "compute_extension:volume_attachments:update": "", - "compute_extension:volume_attachments:delete": "", - "compute_extension:volumetypes": "", - "compute_extension:availability_zone:list": "", - "compute_extension:v3:os-availability-zone:list": "", - "compute_extension:v3:os-availability-zone:discoverable": "", - "compute_extension:availability_zone:detail": "rule:admin_api", - "compute_extension:v3:os-availability-zone:detail": "rule:admin_api", - "compute_extension:used_limits_for_admin": "rule:admin_api", - "compute_extension:migrations:index": "rule:admin_api", - "compute_extension:v3:os-migrations:index": "rule:admin_api", - "compute_extension:v3:os-migrations:discoverable": "", - "compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api", - "compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api", - "compute_extension:console_auth_tokens": "rule:admin_api", - "compute_extension:v3:os-console-auth-tokens": "rule:admin_api", - "compute_extension:os-server-external-events:create": "rule:admin_api", - "compute_extension:v3:os-server-external-events:create": "rule:admin_api", - - "volume:create": "", - "volume:get_all": "", - "volume:get_volume_metadata": "", - "volume:get_snapshot": "", - "volume:get_all_snapshots": "", - - - "volume_extension:types_manage": "rule:admin_api", - "volume_extension:types_extra_specs": "rule:admin_api", - "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", - "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", - "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", - - - "network:get_all": "", - "network:get": "", - "network:create": "", - "network:delete": "", - "network:associate": "", - "network:disassociate": "", - "network:get_vifs_by_instance": "", - "network:allocate_for_instance": "", - "network:deallocate_for_instance": "", - "network:validate_networks": "", - "network:get_instance_uuids_by_ip_filter": "", - "network:get_instance_id_by_floating_address": "", - "network:setup_networks_on_host": "", - "network:get_backdoor_port": "", - - "network:get_floating_ip": "", - "network:get_floating_ip_pools": "", - "network:get_floating_ip_by_address": "", - "network:get_floating_ips_by_project": "", - "network:get_floating_ips_by_fixed_address": "", - "network:allocate_floating_ip": "", - "network:deallocate_floating_ip": "", - "network:associate_floating_ip": "", - "network:disassociate_floating_ip": "", - "network:release_floating_ip": "", - "network:migrate_instance_start": "", - "network:migrate_instance_finish": "", - - "network:get_fixed_ip": "", - "network:get_fixed_ip_by_address": "", - "network:add_fixed_ip_to_instance": "", - "network:remove_fixed_ip_from_instance": "", - "network:add_network_to_project": "", - "network:get_instance_nw_info": "", - - "network:get_dns_domains": "", - "network:add_dns_entry": "", - "network:modify_dns_entry": "", - "network:delete_dns_entry": "", - "network:get_dns_entries_by_address": "", - "network:get_dns_entries_by_name": "", - "network:create_private_dns_domain": "", - "network:create_public_dns_domain": "", - "network:delete_dns_domain": "" -} diff --git a/openstack/usr/share/openstack/openvswitch.yml b/openstack/usr/share/openstack/openvswitch.yml deleted file mode 100644 index 47257f7f..00000000 --- a/openstack/usr/share/openstack/openvswitch.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- hosts: localhost - tasks: - - - name: Create openvswitch directories - file: path={{ item }} state=directory - with_items: - - /etc/openvswitch - - /var/run/openvswitch - - - shell: > - ovsdb-tool create /etc/openvswitch/conf.db /usr/share/openvswitch/vswitch.ovsschema - creates=/etc/openvswitch/conf.db - - # We enable the openvswitch-db-server in a different task to identify - # the first time we run this script by identifying when we enable the - # unit. - # - # We need to identify this to initialise the database. - - name: Enable openvswitch database service - service: name={{ item }} enabled=yes - with_items: - - openvswitch-db-server.service - register: openvswitch_db_enable - - - name: Start openvswitch database service - service: name={{ item }} state=started - with_items: - - openvswitch-db-server.service - - - name: initialise openvswitch-db - shell: ovs-vsctl --no-wait init - when: openvswitch_db_enable|changed - - - name: Enable and start Open vSwitch service - service: name={{ item }} enabled=yes state=started - with_items: - - openvswitch.service diff --git a/openstack/usr/share/openstack/postgres.yml b/openstack/usr/share/openstack/postgres.yml deleted file mode 100644 index 5ff9355e..00000000 --- a/openstack/usr/share/openstack/postgres.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -- hosts: localhost - vars_files: - - "/etc/openstack/postgres.conf" - tasks: - - - name: Create postgres user - user: - name: postgres - comment: PostgreSQL Server - shell: /sbin/nologin - home: /var/lib/pgsql - - - name: Create the postgres directories - file: - path: "{{ item }}" - state: directory - owner: postgres - group: postgres - with_items: - - /var/run/postgresql - - /var/lib/pgsql/data - - - name: Initialise postgres database - command: pg_ctl -D /var/lib/pgsql/data initdb - args: - creates: /var/lib/pgsql/data/base - sudo: yes - sudo_user: postgres - - - name: Add the configuration needed for postgres for Openstack - template: - src: /usr/share/openstack/postgres/{{ item }} - dest: /var/lib/pgsql/data/{{ item }} - owner: postgres - group: postgres - mode: 0600 - with_items: - - postgresql.conf - - pg_hba.conf - - - name: Enable and start postgres services - service: - name: "{{ item }}" - enabled: yes - state: started - with_items: - - postgres-server diff --git a/openstack/usr/share/openstack/postgres/pg_hba.conf b/openstack/usr/share/openstack/postgres/pg_hba.conf deleted file mode 100644 index 78186924..00000000 --- a/openstack/usr/share/openstack/postgres/pg_hba.conf +++ /dev/null @@ -1,5 +0,0 @@ -local all all trust -host all all 127.0.0.0/8 trust -host all all ::1/128 trust -host all all {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}/32 trust -host all all 0.0.0.0/0 md5 diff --git a/openstack/usr/share/openstack/postgres/postgresql.conf b/openstack/usr/share/openstack/postgres/postgresql.conf deleted file mode 100644 index 74153385..00000000 --- a/openstack/usr/share/openstack/postgres/postgresql.conf +++ /dev/null @@ -1,11 +0,0 @@ -listen_addresses = '{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}' -max_connections = 100 -shared_buffers = 128MB -log_timezone = 'UTC' -datestyle = 'iso, mdy' -timezone = 'UTC' -lc_messages = 'C' -lc_monetary = 'C' -lc_numeric = 'C' -lc_time = 'C' -default_text_search_config = 'pg_catalog.english' diff --git a/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf b/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf deleted file mode 100644 index d4c58dae..00000000 --- a/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf +++ /dev/null @@ -1,3 +0,0 @@ -# Configure port node where rabbitmq-server will listen from. -NODE_PORT={{ RABBITMQ_PORT }} -CONFIG_FILE=/etc/rabbitmq/rabbitmq diff --git a/openstack/usr/share/openstack/rabbitmq/rabbitmq.config b/openstack/usr/share/openstack/rabbitmq/rabbitmq.config deleted file mode 100644 index 9b93881e..00000000 --- a/openstack/usr/share/openstack/rabbitmq/rabbitmq.config +++ /dev/null @@ -1,9 +0,0 @@ -%% -*- Rabbit configuration for Openstack in Baserock -[ - {rabbit, - [ - {default_user, <<"{{ RABBITMQ_USER }}">>}, - {default_pass, <<"{{ RABBITMQ_PASSWORD }}">>}, - {tcp_listeners, [{{ RABBITMQ_PORT }}]} - ]} -]. diff --git a/openstack/usr/share/openstack/swift-controller.yml b/openstack/usr/share/openstack/swift-controller.yml deleted file mode 100644 index 690de087..00000000 --- a/openstack/usr/share/openstack/swift-controller.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- hosts: localhost - vars_files: - - swift-controller-vars.yml - vars: - - ring_name_port_map: - account: - port: 6002 - container: - port: 6001 - object: - port: 6000 - remote_user: root - tasks: - - user: - name: swift - comment: Swift user - - - file: - path: /etc/swift - owner: swift - group: swift - state: directory - - - template: - src: /usr/share/swift/etc/swift/proxy-server.j2 - dest: /etc/swift/proxy-server.conf - mode: 0644 - owner: swift - group: swift - - - keystone_user: - user: swift - password: "{{ SWIFT_ADMIN_PASSWORD }}" - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - keystone_user: - role: admin - user: swift - tenant: service - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" - - - keystone_service: - name: swift - type: object-store - description: OpenStack Object Storage - publicurl: http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:8080/v1/AUTH_%(tenant_id)s - internalurl: http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:8080/v1/AUTH_%(tenant_id)s - adminurl: http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:8080 - region: regionOne - token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}" diff --git a/openstack/usr/share/swift/etc/rsyncd.j2 b/openstack/usr/share/swift/etc/rsyncd.j2 deleted file mode 100644 index c0657665..00000000 --- a/openstack/usr/share/swift/etc/rsyncd.j2 +++ /dev/null @@ -1,23 +0,0 @@ -uid = swift -gid = swift -log file = /var/log/rsyncd.log -pid file = /var/run/rsyncd.pid -address = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -[account] -max connections = 2 -path = /srv/node/ -read only = false -lock file = /var/lock/account.lock - -[container] -max connections = 2 -path = /srv/node/ -read only = false -lock file = /var/lock/container.lock - -[object] -max connections = 2 -path = /srv/node/ -read only = false -lock file = /var/lock/object.lock diff --git a/openstack/usr/share/swift/etc/swift/proxy-server.j2 b/openstack/usr/share/swift/etc/swift/proxy-server.j2 deleted file mode 100644 index dda82d5a..00000000 --- a/openstack/usr/share/swift/etc/swift/proxy-server.j2 +++ /dev/null @@ -1,630 +0,0 @@ -[DEFAULT] -# bind_ip = 0.0.0.0 -bind_port = 8080 -# bind_timeout = 30 -# backlog = 4096 -swift_dir = /etc/swift -user = swift - -# Enables exposing configuration settings via HTTP GET /info. -# expose_info = true - -# Key to use for admin calls that are HMAC signed. Default is empty, -# which will disable admin calls to /info. -# admin_key = secret_admin_key -# -# Allows the ability to withhold sections from showing up in the public calls -# to /info. You can withhold subsections by separating the dict level with a -# ".". The following would cause the sections 'container_quotas' and 'tempurl' -# to not be listed, and the key max_failed_deletes would be removed from -# bulk_delete. Default is empty, allowing all registered fetures to be listed -# via HTTP GET /info. -# disallowed_sections = container_quotas, tempurl, bulk_delete.max_failed_deletes - -# Use an integer to override the number of pre-forked processes that will -# accept connections. Should default to the number of effective cpu -# cores in the system. It's worth noting that individual workers will -# use many eventlet co-routines to service multiple concurrent requests. -# workers = auto -# -# Maximum concurrent requests per worker -# max_clients = 1024 -# -# Set the following two lines to enable SSL. This is for testing only. -# cert_file = /etc/swift/proxy.crt -# key_file = /etc/swift/proxy.key -# -# expiring_objects_container_divisor = 86400 -# expiring_objects_account_name = expiring_objects -# -# You can specify default log routing here if you want: -# log_name = swift -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_headers = false -# log_address = /dev/log -# The following caps the length of log lines to the value given; no limit if -# set to 0, the default. -# log_max_line_length = 0 -# -# This optional suffix (default is empty) that would be appended to the swift transaction -# id allows one to easily figure out from which cluster that X-Trans-Id belongs to. -# This is very useful when one is managing more than one swift cluster. -# trans_id_suffix = -# -# comma separated list of functions to call to setup custom log handlers. -# functions get passed: conf, name, log_to_console, log_route, fmt, logger, -# adapted_logger -# log_custom_handlers = -# -# If set, log_udp_host will override log_address -# log_udp_host = -# log_udp_port = 514 -# -# You can enable StatsD logging here: -# log_statsd_host = localhost -# log_statsd_port = 8125 -# log_statsd_default_sample_rate = 1.0 -# log_statsd_sample_rate_factor = 1.0 -# log_statsd_metric_prefix = -# -# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar) -# cors_allow_origin = -# strict_cors_mode = True -# -# client_timeout = 60 -# eventlet_debug = false - -[pipeline:main] -#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo proxy-logging proxy-server -pipeline = authtoken cache healthcheck keystoneauth proxy-logging proxy-server - -[app:proxy-server] -use = egg:swift#proxy -# You can override the default log routing for this app here: -# set log_name = proxy-server -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_address = /dev/log -# -# log_handoffs = true -# recheck_account_existence = 60 -# recheck_container_existence = 60 -# object_chunk_size = 65536 -# client_chunk_size = 65536 -# -# How long the proxy server will wait on responses from the a/c/o servers. -# node_timeout = 10 -# -# How long the proxy server will wait for an initial response and to read a -# chunk of data from the object servers while serving GET / HEAD requests. -# Timeouts from these requests can be recovered from so setting this to -# something lower than node_timeout would provide quicker error recovery -# while allowing for a longer timeout for non-recoverable requests (PUTs). -# Defaults to node_timeout, should be overriden if node_timeout is set to a -# high number to prevent client timeouts from firing before the proxy server -# has a chance to retry. -# recoverable_node_timeout = node_timeout -# -# conn_timeout = 0.5 -# -# How long to wait for requests to finish after a quorum has been established. -# post_quorum_timeout = 0.5 -# -# How long without an error before a node's error count is reset. This will -# also be how long before a node is reenabled after suppression is triggered. -# error_suppression_interval = 60 -# -# How many errors can accumulate before a node is temporarily ignored. -# error_suppression_limit = 10 -# -# If set to 'true' any authorized user may create and delete accounts; if -# 'false' no one, even authorized, can. -allow_account_management = true -# -# Set object_post_as_copy = false to turn on fast posts where only the metadata -# changes are stored anew and the original data file is kept in place. This -# makes for quicker posts; but since the container metadata isn't updated in -# this mode, features like container sync won't be able to sync posts. -# object_post_as_copy = true -# -# If set to 'true' authorized accounts that do not yet exist within the Swift -# cluster will be automatically created. -account_autocreate = true -# -# If set to a positive value, trying to create a container when the account -# already has at least this maximum containers will result in a 403 Forbidden. -# Note: This is a soft limit, meaning a user might exceed the cap for -# recheck_account_existence before the 403s kick in. -# max_containers_per_account = 0 -# -# This is a comma separated list of account hashes that ignore the -# max_containers_per_account cap. -# max_containers_whitelist = -# -# Comma separated list of Host headers to which the proxy will deny requests. -# deny_host_headers = -# -# Prefix used when automatically creating accounts. -# auto_create_account_prefix = . -# -# Depth of the proxy put queue. -# put_queue_depth = 10 -# -# Storage nodes can be chosen at random (shuffle), by using timing -# measurements (timing), or by using an explicit match (affinity). -# Using timing measurements may allow for lower overall latency, while -# using affinity allows for finer control. In both the timing and -# affinity cases, equally-sorting nodes are still randomly chosen to -# spread load. -# The valid values for sorting_method are "affinity", "shuffle", and "timing". -# sorting_method = shuffle -# -# If the "timing" sorting_method is used, the timings will only be valid for -# the number of seconds configured by timing_expiry. -# timing_expiry = 300 -# -# The maximum time (seconds) that a large object connection is allowed to last. -# max_large_object_get_time = 86400 -# -# Set to the number of nodes to contact for a normal request. You can use -# '* replicas' at the end to have it use the number given times the number of -# replicas for the ring being used for the request. -# request_node_count = 2 * replicas -# -# Which backend servers to prefer on reads. Format is r for region -# N or rz for region N, zone M. The value after the equals is -# the priority; lower numbers are higher priority. -# -# Example: first read from region 1 zone 1, then region 1 zone 2, then -# anything in region 2, then everything else: -# read_affinity = r1z1=100, r1z2=200, r2=300 -# Default is empty, meaning no preference. -# read_affinity = -# -# Which backend servers to prefer on writes. Format is r for region -# N or rz for region N, zone M. If this is set, then when -# handling an object PUT request, some number (see setting -# write_affinity_node_count) of local backend servers will be tried -# before any nonlocal ones. -# -# Example: try to write to regions 1 and 2 before writing to any other -# nodes: -# write_affinity = r1, r2 -# Default is empty, meaning no preference. -# write_affinity = -# -# The number of local (as governed by the write_affinity setting) -# nodes to attempt to contact first, before any non-local ones. You -# can use '* replicas' at the end to have it use the number given -# times the number of replicas for the ring being used for the -# request. -# write_affinity_node_count = 2 * replicas -# -# These are the headers whose values will only be shown to swift_owners. The -# exact definition of a swift_owner is up to the auth system in use, but -# usually indicates administrative responsibilities. -# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-account-access-control - -[filter:tempauth] -use = egg:swift#tempauth -# You can override the default log routing for this filter here: -# set log_name = tempauth -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_headers = false -# set log_address = /dev/log -# -# The reseller prefix will verify a token begins with this prefix before even -# attempting to validate it. Also, with authorization, only Swift storage -# accounts with this prefix will be authorized by this middleware. Useful if -# multiple auth systems are in use for one Swift cluster. -# reseller_prefix = AUTH -# -# The auth prefix will cause requests beginning with this prefix to be routed -# to the auth subsystem, for granting tokens, etc. -# auth_prefix = /auth/ -# token_life = 86400 -# -# This allows middleware higher in the WSGI pipeline to override auth -# processing, useful for middleware such as tempurl and formpost. If you know -# you're not going to use such middleware and you want a bit of extra security, -# you can set this to false. -# allow_overrides = true -# -# This specifies what scheme to return with storage urls: -# http, https, or default (chooses based on what the server is running as) -# This can be useful with an SSL load balancer in front of a non-SSL server. -# storage_url_scheme = default -# -# Lastly, you need to list all the accounts/users you want here. The format is: -# user__ = [group] [group] [...] [storage_url] -# or if you want underscores in or , you can base64 encode them -# (with no equal signs) and use this format: -# user64__ = [group] [group] [...] [storage_url] -# There are special groups of: -# .reseller_admin = can do anything to any account for this auth -# .admin = can do anything within the account -# If neither of these groups are specified, the user can only access containers -# that have been explicitly allowed for them by a .admin or .reseller_admin. -# The trailing optional storage_url allows you to specify an alternate url to -# hand back to the user upon authentication. If not specified, this defaults to -# $HOST/v1/_ where $HOST will do its best to resolve -# to what the requester would need to use to reach this host. -# Here are example entries, required for running the tests: -user_admin_admin = admin .admin .reseller_admin -user_test_tester = testing .admin -user_test2_tester2 = testing2 .admin -user_test_tester3 = testing3 - -# To enable Keystone authentication you need to have the auth token -# middleware first to be configured. Here is an example below, please -# refer to the keystone's documentation for details about the -# different settings. -# -# You'll need to have as well the keystoneauth middleware enabled -# and have it in your main pipeline so instead of having tempauth in -# there you can change it to: authtoken keystoneauth -# -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory -# auth_host = keystonehost -# auth_port = 35357 -# auth_protocol = http -# auth_uri = http://keystonehost:5000/ -#auth_uri = http://controller:5000/v2.0 -auth_uri = http://127.0.0.1:5000/v2.0 -identity_uri = http://127.0.0.1:35357 -admin_tenant_name = service -admin_user = swift -admin_password = {{ SWIFT_ADMIN_PASSWORD }} -delay_auth_decision = 1 -# cache = swift.cache -# include_service_catalog = False -# -[filter:keystoneauth] -use = egg:swift#keystoneauth -# Operator roles is the role which user would be allowed to manage a -# tenant and be able to create container or give ACL to others. -# operator_roles = admin, swiftoperator -operator_roles = admin, _member_ -# The reseller admin role has the ability to create and delete accounts -# reseller_admin_role = ResellerAdmin -# For backwards compatibility, keystoneauth will match names in cross-tenant -# access control lists (ACLs) when both the requesting user and the tenant -# are in the default domain i.e the domain to which existing tenants are -# migrated. The default_domain_id value configured here should be the same as -# the value used during migration of tenants to keystone domains. -# default_domain_id = default -# For a new installation, or an installation in which keystone projects may -# move between domains, you should disable backwards compatible name matching -# in ACLs by setting allow_names_in_acls to false: -# allow_names_in_acls = true - -[filter:healthcheck] -use = egg:swift#healthcheck -# An optional filesystem path, which if present, will cause the healthcheck -# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". -# This facility may be used to temporarily remove a Swift node from a load -# balancer pool during maintenance or upgrade (remove the file to allow the -# node back into the load balancer pool). -# disable_path = - -[filter:cache] -use = egg:swift#memcache -# You can override the default log routing for this filter here: -# set log_name = cache -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_headers = false -# set log_address = /dev/log -# -# If not set here, the value for memcache_servers will be read from -# memcache.conf (see memcache.conf-sample) or lacking that file, it will -# default to the value below. You can specify multiple servers separated with -# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 -memcache_servers = 127.0.0.1:11211 -# -# Sets how memcache values are serialized and deserialized: -# 0 = older, insecure pickle serialization -# 1 = json serialization but pickles can still be read (still insecure) -# 2 = json serialization only (secure and the default) -# If not set here, the value for memcache_serialization_support will be read -# from /etc/swift/memcache.conf (see memcache.conf-sample). -# To avoid an instant full cache flush, existing installations should -# upgrade with 0, then set to 1 and reload, then after some time (24 hours) -# set to 2 and reload. -# In the future, the ability to use pickle serialization will be removed. -# memcache_serialization_support = 2 -# -# Sets the maximum number of connections to each memcached server per worker -# memcache_max_connections = 2 - -[filter:ratelimit] -use = egg:swift#ratelimit -# You can override the default log routing for this filter here: -# set log_name = ratelimit -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_headers = false -# set log_address = /dev/log -# -# clock_accuracy should represent how accurate the proxy servers' system clocks -# are with each other. 1000 means that all the proxies' clock are accurate to -# each other within 1 millisecond. No ratelimit should be higher than the -# clock accuracy. -# clock_accuracy = 1000 -# -# max_sleep_time_seconds = 60 -# -# log_sleep_time_seconds of 0 means disabled -# log_sleep_time_seconds = 0 -# -# allows for slow rates (e.g. running up to 5 sec's behind) to catch up. -# rate_buffer_seconds = 5 -# -# account_ratelimit of 0 means disabled -# account_ratelimit = 0 - -# these are comma separated lists of account names -# account_whitelist = a,b -# account_blacklist = c,d - -# with container_limit_x = r -# for containers of size x limit write requests per second to r. The container -# rate will be linearly interpolated from the values given. With the values -# below, a container of size 5 will get a rate of 75. -# container_ratelimit_0 = 100 -# container_ratelimit_10 = 50 -# container_ratelimit_50 = 20 - -# Similarly to the above container-level write limits, the following will limit -# container GET (listing) requests. -# container_listing_ratelimit_0 = 100 -# container_listing_ratelimit_10 = 50 -# container_listing_ratelimit_50 = 20 - -[filter:domain_remap] -use = egg:swift#domain_remap -# You can override the default log routing for this filter here: -# set log_name = domain_remap -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_headers = false -# set log_address = /dev/log -# -# storage_domain = example.com -# path_root = v1 -# reseller_prefixes = AUTH - -[filter:catch_errors] -use = egg:swift#catch_errors -# You can override the default log routing for this filter here: -# set log_name = catch_errors -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_headers = false -# set log_address = /dev/log - -[filter:cname_lookup] -# Note: this middleware requires python-dnspython -use = egg:swift#cname_lookup -# You can override the default log routing for this filter here: -# set log_name = cname_lookup -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_headers = false -# set log_address = /dev/log -# -# Specify the storage_domain that match your cloud, multiple domains -# can be specified separated by a comma -# storage_domain = example.com -# -# lookup_depth = 1 - -# Note: Put staticweb just after your auth filter(s) in the pipeline -[filter:staticweb] -use = egg:swift#staticweb - -# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline -[filter:tempurl] -use = egg:swift#tempurl -# The methods allowed with Temp URLs. -# methods = GET HEAD PUT POST DELETE -# -# The headers to remove from incoming requests. Simply a whitespace delimited -# list of header names and names can optionally end with '*' to indicate a -# prefix match. incoming_allow_headers is a list of exceptions to these -# removals. -# incoming_remove_headers = x-timestamp -# -# The headers allowed as exceptions to incoming_remove_headers. Simply a -# whitespace delimited list of header names and names can optionally end with -# '*' to indicate a prefix match. -# incoming_allow_headers = -# -# The headers to remove from outgoing responses. Simply a whitespace delimited -# list of header names and names can optionally end with '*' to indicate a -# prefix match. outgoing_allow_headers is a list of exceptions to these -# removals. -# outgoing_remove_headers = x-object-meta-* -# -# The headers allowed as exceptions to outgoing_remove_headers. Simply a -# whitespace delimited list of header names and names can optionally end with -# '*' to indicate a prefix match. -# outgoing_allow_headers = x-object-meta-public-* - -# Note: Put formpost just before your auth filter(s) in the pipeline -[filter:formpost] -use = egg:swift#formpost - -# Note: Just needs to be placed before the proxy-server in the pipeline. -[filter:name_check] -use = egg:swift#name_check -# forbidden_chars = '"`<> -# maximum_length = 255 -# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$ - -[filter:list-endpoints] -use = egg:swift#list_endpoints -# list_endpoints_path = /endpoints/ - -[filter:proxy-logging] -use = egg:swift#proxy_logging -# If not set, logging directives from [DEFAULT] without "access_" will be used -# access_log_name = swift -# access_log_facility = LOG_LOCAL0 -# access_log_level = INFO -# access_log_address = /dev/log -# -# If set, access_log_udp_host will override access_log_address -# access_log_udp_host = -# access_log_udp_port = 514 -# -# You can use log_statsd_* from [DEFAULT] or override them here: -# access_log_statsd_host = localhost -# access_log_statsd_port = 8125 -# access_log_statsd_default_sample_rate = 1.0 -# access_log_statsd_sample_rate_factor = 1.0 -# access_log_statsd_metric_prefix = -# access_log_headers = false -# -# If access_log_headers is True and access_log_headers_only is set only -# these headers are logged. Multiple headers can be defined as comma separated -# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime -# access_log_headers_only = -# -# By default, the X-Auth-Token is logged. To obscure the value, -# set reveal_sensitive_prefix to the number of characters to log. -# For example, if set to 12, only the first 12 characters of the -# token appear in the log. An unauthorized access of the log file -# won't allow unauthorized usage of the token. However, the first -# 12 or so characters is unique enough that you can trace/debug -# token usage. Set to 0 to suppress the token completely (replaced -# by '...' in the log). -# Note: reveal_sensitive_prefix will not affect the value -# logged with access_log_headers=True. -# reveal_sensitive_prefix = 16 -# -# What HTTP methods are allowed for StatsD logging (comma-sep); request methods -# not in this list will have "BAD_METHOD" for the portion of the metric. -# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS -# -# Note: The double proxy-logging in the pipeline is not a mistake. The -# left-most proxy-logging is there to log requests that were handled in -# middleware and never made it through to the right-most middleware (and -# proxy server). Double logging is prevented for normal requests. See -# proxy-logging docs. - -# Note: Put before both ratelimit and auth in the pipeline. -[filter:bulk] -use = egg:swift#bulk -# max_containers_per_extraction = 10000 -# max_failed_extractions = 1000 -# max_deletes_per_request = 10000 -# max_failed_deletes = 1000 - -# In order to keep a connection active during a potentially long bulk request, -# Swift may return whitespace prepended to the actual response body. This -# whitespace will be yielded no more than every yield_frequency seconds. -# yield_frequency = 10 - -# Note: The following parameter is used during a bulk delete of objects and -# their container. This would frequently fail because it is very likely -# that all replicated objects have not been deleted by the time the middleware got a -# successful response. It can be configured the number of retries. And the -# number of seconds to wait between each retry will be 1.5**retry - -# delete_container_retry_count = 0 - -# Note: Put after auth in the pipeline. -[filter:container-quotas] -use = egg:swift#container_quotas - -# Note: Put after auth and staticweb in the pipeline. -[filter:slo] -use = egg:swift#slo -# max_manifest_segments = 1000 -# max_manifest_size = 2097152 -# min_segment_size = 1048576 -# Start rate-limiting SLO segment serving after the Nth segment of a -# segmented object. -# rate_limit_after_segment = 10 -# -# Once segment rate-limiting kicks in for an object, limit segments served -# to N per second. 0 means no rate-limiting. -# rate_limit_segments_per_sec = 0 -# -# Time limit on GET requests (seconds) -# max_get_time = 86400 - -# Note: Put after auth and staticweb in the pipeline. -# If you don't put it in the pipeline, it will be inserted for you. -[filter:dlo] -use = egg:swift#dlo -# Start rate-limiting DLO segment serving after the Nth segment of a -# segmented object. -# rate_limit_after_segment = 10 -# -# Once segment rate-limiting kicks in for an object, limit segments served -# to N per second. 0 means no rate-limiting. -# rate_limit_segments_per_sec = 1 -# -# Time limit on GET requests (seconds) -# max_get_time = 86400 - -[filter:account-quotas] -use = egg:swift#account_quotas - -[filter:gatekeeper] -use = egg:swift#gatekeeper -# You can override the default log routing for this filter here: -# set log_name = gatekeeper -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_headers = false -# set log_address = /dev/log - -[filter:container_sync] -use = egg:swift#container_sync -# Set this to false if you want to disallow any full url values to be set for -# any new X-Container-Sync-To headers. This will keep any new full urls from -# coming in, but won't change any existing values already in the cluster. -# Updating those will have to be done manually, as knowing what the true realm -# endpoint should be cannot always be guessed. -# allow_full_urls = true -# Set this to specify this clusters //realm/cluster as "current" in /info -# current = //REALM/CLUSTER - -# Note: Put it at the beginning of the pipleline to profile all middleware. But -# it is safer to put this after catch_errors, gatekeeper and healthcheck. -[filter:xprofile] -use = egg:swift#xprofile -# This option enable you to switch profilers which should inherit from python -# standard profiler. Currently the supported value can be 'cProfile', -# 'eventlet.green.profile' etc. -# profile_module = eventlet.green.profile -# -# This prefix will be used to combine process ID and timestamp to name the -# profile data file. Make sure the executing user has permission to write -# into this path (missing path segments will be created, if necessary). -# If you enable profiling in more than one type of daemon, you must override -# it with an unique value like: /var/log/swift/profile/proxy.profile -# log_filename_prefix = /tmp/log/swift/profile/default.profile -# -# the profile data will be dumped to local disk based on above naming rule -# in this interval. -# dump_interval = 5.0 -# -# Be careful, this option will enable profiler to dump data into the file with -# time stamp which means there will be lots of files piled up in the directory. -# dump_timestamp = false -# -# This is the path of the URL to access the mini web UI. -# path = /__profile__ -# -# Clear the data when the wsgi server shutdown. -# flush_at_shutdown = false -# -# unwind the iterator of applications -# unwind = false diff --git a/swift/etc/ntp.conf b/swift/etc/ntp.conf deleted file mode 100644 index 54522871..00000000 --- a/swift/etc/ntp.conf +++ /dev/null @@ -1,25 +0,0 @@ -{% if SWIFT_CONTROLLER is undefined or SWIFT_CONTROLLER == "False" %} -server {{ CONTROLLER_HOST_ADDRESS }} iburst -{% else %} -# We use iburst here to reduce the potential initial delay to set the clock -server 0.pool.ntp.org iburst -server 1.pool.ntp.org iburst -server 2.pool.ntp.org iburst -server 3.pool.ntp.org iburst - -# kod - notify client when packets are denied service, -# rather than just dropping the packets -# -# nomodify - deny queries which attempt to modify the state of the server -# -# notrap - decline to provide mode 6 control message trap service to -# matching hosts -# -# see ntp.conf(5) for more details -restrict -4 default kod notrap nomodify -restrict -6 default kod notrap nomodify -{% endif %} - -# The default rlimit isn't enough in some cases -# so we set a higher limit here -rlimit memlock 256 diff --git a/swift/manifest b/swift/manifest deleted file mode 100644 index 7fd76206..00000000 --- a/swift/manifest +++ /dev/null @@ -1,15 +0,0 @@ -0040755 0 0 /usr/share -0040755 0 0 /usr/share/swift -0100644 0 0 /usr/share/swift/hosts -0100644 0 0 /usr/share/swift/swift-storage.yml -0040755 0 0 /usr/share/swift/etc -0040755 0 0 /usr/share/swift/etc/swift -0100644 0 0 /usr/share/swift/etc/swift/account-server.j2 -0100644 0 0 /usr/share/swift/etc/swift/swift.j2 -0100644 0 0 /usr/share/swift/etc/swift/object-server.j2 -0100644 0 0 /usr/share/swift/etc/swift/container-server.j2 -0100644 0 0 /usr/share/swift/etc/rsyncd.j2 -0100644 0 0 /usr/lib/systemd/system/swift-storage-setup.service -0100644 0 0 /usr/lib/systemd/system/swift-storage.service -template overwrite 0100644 0 0 /etc/ntp.conf -overwrite 0100644 0 0 /usr/lib/systemd/system/rsync.service diff --git a/swift/usr/lib/systemd/system/rsync.service b/swift/usr/lib/systemd/system/rsync.service deleted file mode 100644 index babcfb46..00000000 --- a/swift/usr/lib/systemd/system/rsync.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=fast remote file copy program daemon -After=swift-storage-setup.service -ConditionPathExists=/etc/rsyncd.conf - -[Service] -ExecStart=/usr/bin/rsync --daemon --no-detach -Restart=on-failure - -[Install] -WantedBy=multi-user.target diff --git a/swift/usr/lib/systemd/system/swift-storage-setup.service b/swift/usr/lib/systemd/system/swift-storage-setup.service deleted file mode 100644 index 3df31163..00000000 --- a/swift/usr/lib/systemd/system/swift-storage-setup.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Run openstack-swift-storage-setup (once) -After=local-fs.target postgres-server-setup.service - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/swift/hosts /usr/share/swift/swift-storage.yml -Restart=no - -[Install] -WantedBy=multi-user.target diff --git a/swift/usr/lib/systemd/system/swift-storage.service b/swift/usr/lib/systemd/system/swift-storage.service deleted file mode 100644 index dc41d3bc..00000000 --- a/swift/usr/lib/systemd/system/swift-storage.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=OpenStack Swift Storage -After=syslog.target network.target swift-storage-setup.service - -[Service] -Type=forking -Restart=on-failure -ExecStart=/usr/bin/swift-init all start -ExecStop=/usr/bin/swift-init all stop - -[Install] -WantedBy=multi-user.target diff --git a/swift/usr/share/swift/etc/rsyncd.j2 b/swift/usr/share/swift/etc/rsyncd.j2 deleted file mode 100644 index c0657665..00000000 --- a/swift/usr/share/swift/etc/rsyncd.j2 +++ /dev/null @@ -1,23 +0,0 @@ -uid = swift -gid = swift -log file = /var/log/rsyncd.log -pid file = /var/run/rsyncd.pid -address = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} - -[account] -max connections = 2 -path = /srv/node/ -read only = false -lock file = /var/lock/account.lock - -[container] -max connections = 2 -path = /srv/node/ -read only = false -lock file = /var/lock/container.lock - -[object] -max connections = 2 -path = /srv/node/ -read only = false -lock file = /var/lock/object.lock diff --git a/swift/usr/share/swift/etc/swift/account-server.j2 b/swift/usr/share/swift/etc/swift/account-server.j2 deleted file mode 100644 index d977e295..00000000 --- a/swift/usr/share/swift/etc/swift/account-server.j2 +++ /dev/null @@ -1,192 +0,0 @@ -[DEFAULT] -# bind_ip = 0.0.0.0 -bind_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} -bind_port = 6002 -# bind_timeout = 30 -# backlog = 4096 -user = swift -swift_dir = /etc/swift -devices = /srv/node -# mount_check = true -# disable_fallocate = false -# -# Use an integer to override the number of pre-forked processes that will -# accept connections. -# workers = auto -# -# Maximum concurrent requests per worker -# max_clients = 1024 -# -# You can specify default log routing here if you want: -# log_name = swift -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# The following caps the length of log lines to the value given; no limit if -# set to 0, the default. -# log_max_line_length = 0 -# -# comma separated list of functions to call to setup custom log handlers. -# functions get passed: conf, name, log_to_console, log_route, fmt, logger, -# adapted_logger -# log_custom_handlers = -# -# If set, log_udp_host will override log_address -# log_udp_host = -# log_udp_port = 514 -# -# You can enable StatsD logging here: -# log_statsd_host = localhost -# log_statsd_port = 8125 -# log_statsd_default_sample_rate = 1.0 -# log_statsd_sample_rate_factor = 1.0 -# log_statsd_metric_prefix = -# -# If you don't mind the extra disk space usage in overhead, you can turn this -# on to preallocate disk space with SQLite databases to decrease fragmentation. -# db_preallocation = off -# -# eventlet_debug = false -# -# You can set fallocate_reserve to the number of bytes you'd like fallocate to -# reserve, whether there is space for the given file size or not. -# fallocate_reserve = 0 - -[pipeline:main] -pipeline = healthcheck recon account-server - -[app:account-server] -use = egg:swift#account -# You can override the default log routing for this app here: -# set log_name = account-server -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_requests = true -# set log_address = /dev/log -# -# auto_create_account_prefix = . -# -# Configure parameter for creating specific server -# To handle all verbs, including replication verbs, do not specify -# "replication_server" (this is the default). To only handle replication, -# set to a True value (e.g. "True" or "1"). To handle only non-replication -# verbs, set to "False". Unless you have a separate replication network, you -# should not specify any value for "replication_server". -# replication_server = false - -[filter:healthcheck] -use = egg:swift#healthcheck -# An optional filesystem path, which if present, will cause the healthcheck -# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" -# disable_path = - -[filter:recon] -use = egg:swift#recon -recon_cache_path = /var/cache/swift - -[account-replicator] -# You can override the default log routing for this app here (don't use set!): -# log_name = account-replicator -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# vm_test_mode = no -# per_diff = 1000 -# max_diffs = 100 -# concurrency = 8 -# interval = 30 -# -# How long without an error before a node's error count is reset. This will -# also be how long before a node is reenabled after suppression is triggered. -# error_suppression_interval = 60 -# -# How many errors can accumulate before a node is temporarily ignored. -# error_suppression_limit = 10 -# -# node_timeout = 10 -# conn_timeout = 0.5 -# -# The replicator also performs reclamation -# reclaim_age = 604800 -# -# Time in seconds to wait between replication passes -# Note: if the parameter 'interval' is defined then it will be used in place -# of run_pause. -# run_pause = 30 -# -# recon_cache_path = /var/cache/swift - -[account-auditor] -# You can override the default log routing for this app here (don't use set!): -# log_name = account-auditor -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# Will audit each account at most once per interval -# interval = 1800 -# -# log_facility = LOG_LOCAL0 -# log_level = INFO -# accounts_per_second = 200 -# recon_cache_path = /var/cache/swift - -[account-reaper] -# You can override the default log routing for this app here (don't use set!): -# log_name = account-reaper -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# concurrency = 25 -# interval = 3600 -# node_timeout = 10 -# conn_timeout = 0.5 -# -# Normally, the reaper begins deleting account information for deleted accounts -# immediately; you can set this to delay its work however. The value is in -# seconds; 2592000 = 30 days for example. -# delay_reaping = 0 -# -# If the account fails to be be reaped due to a persistent error, the -# account reaper will log a message such as: -# Account has not been reaped since -# You can search logs for this message if space is not being reclaimed -# after you delete account(s). -# Default is 2592000 seconds (30 days). This is in addition to any time -# requested by delay_reaping. -# reap_warn_after = 2592000 - -# Note: Put it at the beginning of the pipeline to profile all middleware. But -# it is safer to put this after healthcheck. -[filter:xprofile] -use = egg:swift#xprofile -# This option enable you to switch profilers which should inherit from python -# standard profiler. Currently the supported value can be 'cProfile', -# 'eventlet.green.profile' etc. -# profile_module = eventlet.green.profile -# -# This prefix will be used to combine process ID and timestamp to name the -# profile data file. Make sure the executing user has permission to write -# into this path (missing path segments will be created, if necessary). -# If you enable profiling in more than one type of daemon, you must override -# it with an unique value like: /var/log/swift/profile/account.profile -# log_filename_prefix = /tmp/log/swift/profile/default.profile -# -# the profile data will be dumped to local disk based on above naming rule -# in this interval. -# dump_interval = 5.0 -# -# Be careful, this option will enable profiler to dump data into the file with -# time stamp which means there will be lots of files piled up in the directory. -# dump_timestamp = false -# -# This is the path of the URL to access the mini web UI. -# path = /__profile__ -# -# Clear the data when the wsgi server shutdown. -# flush_at_shutdown = false -# -# unwind the iterator of applications -# unwind = false diff --git a/swift/usr/share/swift/etc/swift/container-server.j2 b/swift/usr/share/swift/etc/swift/container-server.j2 deleted file mode 100644 index d226d016..00000000 --- a/swift/usr/share/swift/etc/swift/container-server.j2 +++ /dev/null @@ -1,203 +0,0 @@ -[DEFAULT] -# bind_ip = 0.0.0.0 -bind_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} -bind_port = 6001 -# bind_timeout = 30 -# backlog = 4096 -user = swift -swift_dir = /etc/swift -devices = /srv/node -# mount_check = true -# disable_fallocate = false -# -# Use an integer to override the number of pre-forked processes that will -# accept connections. -# workers = auto -# -# Maximum concurrent requests per worker -# max_clients = 1024 -# -# This is a comma separated list of hosts allowed in the X-Container-Sync-To -# field for containers. This is the old-style of using container sync. It is -# strongly recommended to use the new style of a separate -# container-sync-realms.conf -- see container-sync-realms.conf-sample -# allowed_sync_hosts = 127.0.0.1 -# -# You can specify default log routing here if you want: -# log_name = swift -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# The following caps the length of log lines to the value given; no limit if -# set to 0, the default. -# log_max_line_length = 0 -# -# comma separated list of functions to call to setup custom log handlers. -# functions get passed: conf, name, log_to_console, log_route, fmt, logger, -# adapted_logger -# log_custom_handlers = -# -# If set, log_udp_host will override log_address -# log_udp_host = -# log_udp_port = 514 -# -# You can enable StatsD logging here: -# log_statsd_host = localhost -# log_statsd_port = 8125 -# log_statsd_default_sample_rate = 1.0 -# log_statsd_sample_rate_factor = 1.0 -# log_statsd_metric_prefix = -# -# If you don't mind the extra disk space usage in overhead, you can turn this -# on to preallocate disk space with SQLite databases to decrease fragmentation. -# db_preallocation = off -# -# eventlet_debug = false -# -# You can set fallocate_reserve to the number of bytes you'd like fallocate to -# reserve, whether there is space for the given file size or not. -# fallocate_reserve = 0 - -[pipeline:main] -pipeline = healthcheck recon container-server - -[app:container-server] -use = egg:swift#container -# You can override the default log routing for this app here: -# set log_name = container-server -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_requests = true -# set log_address = /dev/log -# -# node_timeout = 3 -# conn_timeout = 0.5 -# allow_versions = false -# auto_create_account_prefix = . -# -# Configure parameter for creating specific server -# To handle all verbs, including replication verbs, do not specify -# "replication_server" (this is the default). To only handle replication, -# set to a True value (e.g. "True" or "1"). To handle only non-replication -# verbs, set to "False". Unless you have a separate replication network, you -# should not specify any value for "replication_server". -# replication_server = false - -[filter:healthcheck] -use = egg:swift#healthcheck -# An optional filesystem path, which if present, will cause the healthcheck -# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" -# disable_path = - -[filter:recon] -use = egg:swift#recon -recon_cache_path = /var/cache/swift - -[container-replicator] -# You can override the default log routing for this app here (don't use set!): -# log_name = container-replicator -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# vm_test_mode = no -# per_diff = 1000 -# max_diffs = 100 -# concurrency = 8 -# interval = 30 -# node_timeout = 10 -# conn_timeout = 0.5 -# -# The replicator also performs reclamation -# reclaim_age = 604800 -# -# Time in seconds to wait between replication passes -# Note: if the parameter 'interval' is defined then it will be used in place -# of run_pause. -# run_pause = 30 -# -# recon_cache_path = /var/cache/swift - -[container-updater] -# You can override the default log routing for this app here (don't use set!): -# log_name = container-updater -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# interval = 300 -# concurrency = 4 -# node_timeout = 3 -# conn_timeout = 0.5 -# -# slowdown will sleep that amount between containers -# slowdown = 0.01 -# -# Seconds to suppress updating an account that has generated an error -# account_suppression_time = 60 -# -# recon_cache_path = /var/cache/swift - -[container-auditor] -# You can override the default log routing for this app here (don't use set!): -# log_name = container-auditor -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# Will audit each container at most once per interval -# interval = 1800 -# -# containers_per_second = 200 -# recon_cache_path = /var/cache/swift - -[container-sync] -# You can override the default log routing for this app here (don't use set!): -# log_name = container-sync -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# If you need to use an HTTP Proxy, set it here; defaults to no proxy. -# You can also set this to a comma separated list of HTTP Proxies and they will -# be randomly used (simple load balancing). -# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888 -# -# Will sync each container at most once per interval -# interval = 300 -# -# Maximum amount of time to spend syncing each container per pass -# container_time = 60 - -# Note: Put it at the beginning of the pipeline to profile all middleware. But -# it is safer to put this after healthcheck. -[filter:xprofile] -use = egg:swift#xprofile -# This option enable you to switch profilers which should inherit from python -# standard profiler. Currently the supported value can be 'cProfile', -# 'eventlet.green.profile' etc. -# profile_module = eventlet.green.profile -# -# This prefix will be used to combine process ID and timestamp to name the -# profile data file. Make sure the executing user has permission to write -# into this path (missing path segments will be created, if necessary). -# If you enable profiling in more than one type of daemon, you must override -# it with an unique value like: /var/log/swift/profile/container.profile -# log_filename_prefix = /tmp/log/swift/profile/default.profile -# -# the profile data will be dumped to local disk based on above naming rule -# in this interval. -# dump_interval = 5.0 -# -# Be careful, this option will enable profiler to dump data into the file with -# time stamp which means there will be lots of files piled up in the directory. -# dump_timestamp = false -# -# This is the path of the URL to access the mini web UI. -# path = /__profile__ -# -# Clear the data when the wsgi server shutdown. -# flush_at_shutdown = false -# -# unwind the iterator of applications -# unwind = false diff --git a/swift/usr/share/swift/etc/swift/object-server.j2 b/swift/usr/share/swift/etc/swift/object-server.j2 deleted file mode 100644 index 66990be9..00000000 --- a/swift/usr/share/swift/etc/swift/object-server.j2 +++ /dev/null @@ -1,283 +0,0 @@ -[DEFAULT] -# bind_ip = 0.0.0.0 -bind_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }} -bind_port = 6000 -# bind_timeout = 30 -# backlog = 4096 -user = swift -swift_dir = /etc/swift -devices = /srv/node -# mount_check = true -# disable_fallocate = false -# expiring_objects_container_divisor = 86400 -# expiring_objects_account_name = expiring_objects -# -# Use an integer to override the number of pre-forked processes that will -# accept connections. -# workers = auto -# -# Maximum concurrent requests per worker -# max_clients = 1024 -# -# You can specify default log routing here if you want: -# log_name = swift -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# The following caps the length of log lines to the value given; no limit if -# set to 0, the default. -# log_max_line_length = 0 -# -# comma separated list of functions to call to setup custom log handlers. -# functions get passed: conf, name, log_to_console, log_route, fmt, logger, -# adapted_logger -# log_custom_handlers = -# -# If set, log_udp_host will override log_address -# log_udp_host = -# log_udp_port = 514 -# -# You can enable StatsD logging here: -# log_statsd_host = localhost -# log_statsd_port = 8125 -# log_statsd_default_sample_rate = 1.0 -# log_statsd_sample_rate_factor = 1.0 -# log_statsd_metric_prefix = -# -# eventlet_debug = false -# -# You can set fallocate_reserve to the number of bytes you'd like fallocate to -# reserve, whether there is space for the given file size or not. -# fallocate_reserve = 0 -# -# Time to wait while attempting to connect to another backend node. -# conn_timeout = 0.5 -# Time to wait while sending each chunk of data to another backend node. -# node_timeout = 3 -# Time to wait while receiving each chunk of data from a client or another -# backend node. -# client_timeout = 60 -# -# network_chunk_size = 65536 -# disk_chunk_size = 65536 - -[pipeline:main] -pipeline = healthcheck recon object-server - -[app:object-server] -use = egg:swift#object -# You can override the default log routing for this app here: -# set log_name = object-server -# set log_facility = LOG_LOCAL0 -# set log_level = INFO -# set log_requests = true -# set log_address = /dev/log -# -# max_upload_time = 86400 -# slow = 0 -# -# Objects smaller than this are not evicted from the buffercache once read -# keep_cache_size = 5424880 -# -# If true, objects for authenticated GET requests may be kept in buffer cache -# if small enough -# keep_cache_private = false -# -# on PUTs, sync data every n MB -# mb_per_sync = 512 -# -# Comma separated list of headers that can be set in metadata on an object. -# This list is in addition to X-Object-Meta-* headers and cannot include -# Content-Type, etag, Content-Length, or deleted -# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object -# -# auto_create_account_prefix = . -# -# A value of 0 means "don't use thread pools". A reasonable starting point is -# 4. -# threads_per_disk = 0 -# -# Configure parameter for creating specific server -# To handle all verbs, including replication verbs, do not specify -# "replication_server" (this is the default). To only handle replication, -# set to a True value (e.g. "True" or "1"). To handle only non-replication -# verbs, set to "False". Unless you have a separate replication network, you -# should not specify any value for "replication_server". -# replication_server = false -# -# Set to restrict the number of concurrent incoming REPLICATION requests -# Set to 0 for unlimited -# Note that REPLICATION is currently an ssync only item -# replication_concurrency = 4 -# -# Restricts incoming REPLICATION requests to one per device, -# replication_currency above allowing. This can help control I/O to each -# device, but you may wish to set this to False to allow multiple REPLICATION -# requests (up to the above replication_concurrency setting) per device. -# replication_one_per_device = True -# -# Number of seconds to wait for an existing replication device lock before -# giving up. -# replication_lock_timeout = 15 -# -# These next two settings control when the REPLICATION subrequest handler will -# abort an incoming REPLICATION attempt. An abort will occur if there are at -# least threshold number of failures and the value of failures / successes -# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100 -# failures have to occur and there have to be more failures than successes for -# an abort to occur. -# replication_failure_threshold = 100 -# replication_failure_ratio = 1.0 -# -# Use splice() for zero-copy object GETs. This requires Linux kernel -# version 3.0 or greater. If you set "splice = yes" but the kernel -# does not support it, error messages will appear in the object server -# logs at startup, but your object servers should continue to function. -# -# splice = no - -[filter:healthcheck] -use = egg:swift#healthcheck -# An optional filesystem path, which if present, will cause the healthcheck -# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" -# disable_path = - -[filter:recon] -use = egg:swift#recon -recon_cache_path = /var/cache/swift -#recon_lock_path = /var/lock - -[object-replicator] -# You can override the default log routing for this app here (don't use set!): -# log_name = object-replicator -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# vm_test_mode = no -# daemonize = on -# run_pause = 30 -# concurrency = 1 -# stats_interval = 300 -# -# The sync method to use; default is rsync but you can use ssync to try the -# EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified -# as having performance comparable to, or better than, rsync, we plan to -# deprecate rsync so we can move on with more features for replication. -# sync_method = rsync -# -# max duration of a partition rsync -# rsync_timeout = 900 -# -# bandwidth limit for rsync in kB/s. 0 means unlimited -# rsync_bwlimit = 0 -# -# passed to rsync for io op timeout -# rsync_io_timeout = 30 -# -# node_timeout = -# max duration of an http request; this is for REPLICATE finalization calls and -# so should be longer than node_timeout -# http_timeout = 60 -# -# attempts to kill all workers if nothing replicates for lockup_timeout seconds -# lockup_timeout = 1800 -# -# The replicator also performs reclamation -# reclaim_age = 604800 -# -# ring_check_interval = 15 -# recon_cache_path = /var/cache/swift -# -# limits how long rsync error log lines are -# 0 means to log the entire line -# rsync_error_log_line_length = 0 -# -# handoffs_first and handoff_delete are options for a special case -# such as disk full in the cluster. These two options SHOULD NOT BE -# CHANGED, except for such an extreme situations. (e.g. disks filled up -# or are about to fill up. Anyway, DO NOT let your drives fill up) -# handoffs_first is the flag to replicate handoffs prior to canonical -# partitions. It allows to force syncing and deleting handoffs quickly. -# If set to a True value(e.g. "True" or "1"), partitions -# that are not supposed to be on the node will be replicated first. -# handoffs_first = False -# -# handoff_delete is the number of replicas which are ensured in swift. -# If the number less than the number of replicas is set, object-replicator -# could delete local handoffs even if all replicas are not ensured in the -# cluster. Object-replicator would remove local handoff partition directories -# after syncing partition when the number of successful responses is greater -# than or equal to this number. By default(auto), handoff partitions will be -# removed when it has successfully replicated to all the canonical nodes. -# handoff_delete = auto - -[object-updater] -# You can override the default log routing for this app here (don't use set!): -# log_name = object-updater -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# interval = 300 -# concurrency = 1 -# node_timeout = -# slowdown will sleep that amount between objects -# slowdown = 0.01 -# -# recon_cache_path = /var/cache/swift - -[object-auditor] -# You can override the default log routing for this app here (don't use set!): -# log_name = object-auditor -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_address = /dev/log -# -# You can set the disk chunk size that the auditor uses making it larger if -# you like for more efficient local auditing of larger objects -# disk_chunk_size = 65536 -# files_per_second = 20 -# concurrency = 1 -# bytes_per_second = 10000000 -# log_time = 3600 -# zero_byte_files_per_second = 50 -# recon_cache_path = /var/cache/swift - -# Takes a comma separated list of ints. If set, the object auditor will -# increment a counter for every object whose size is <= to the given break -# points and report the result after a full scan. -# object_size_stats = - -# Note: Put it at the beginning of the pipleline to profile all middleware. But -# it is safer to put this after healthcheck. -[filter:xprofile] -use = egg:swift#xprofile -# This option enable you to switch profilers which should inherit from python -# standard profiler. Currently the supported value can be 'cProfile', -# 'eventlet.green.profile' etc. -# profile_module = eventlet.green.profile -# -# This prefix will be used to combine process ID and timestamp to name the -# profile data file. Make sure the executing user has permission to write -# into this path (missing path segments will be created, if necessary). -# If you enable profiling in more than one type of daemon, you must override -# it with an unique value like: /var/log/swift/profile/object.profile -# log_filename_prefix = /tmp/log/swift/profile/default.profile -# -# the profile data will be dumped to local disk based on above naming rule -# in this interval. -# dump_interval = 5.0 -# -# Be careful, this option will enable profiler to dump data into the file with -# time stamp which means there will be lots of files piled up in the directory. -# dump_timestamp = false -# -# This is the path of the URL to access the mini web UI. -# path = /__profile__ -# -# Clear the data when the wsgi server shutdown. -# flush_at_shutdown = false -# -# unwind the iterator of applications -# unwind = false diff --git a/swift/usr/share/swift/etc/swift/swift.j2 b/swift/usr/share/swift/etc/swift/swift.j2 deleted file mode 100644 index 6d76215a..00000000 --- a/swift/usr/share/swift/etc/swift/swift.j2 +++ /dev/null @@ -1,118 +0,0 @@ -[swift-hash] - -# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the -# the hashing algorithm when determining data placement in the cluster. -# These values should remain secret and MUST NOT change -# once a cluster has been deployed. - -swift_hash_path_suffix = {{ SWIFT_HASH_PATH_SUFFIX }} -swift_hash_path_prefix = {{ SWIFT_HASH_PATH_PREFIX }} - -# storage policies are defined here and determine various characteristics -# about how objects are stored and treated. Policies are specified by name on -# a per container basis. Names are case-insensitive. The policy index is -# specified in the section header and is used internally. The policy with -# index 0 is always used for legacy containers and can be given a name for use -# in metadata however the ring file name will always be 'object.ring.gz' for -# backwards compatibility. If no policies are defined a policy with index 0 -# will be automatically created for backwards compatibility and given the name -# Policy-0. A default policy is used when creating new containers when no -# policy is specified in the request. If no other policies are defined the -# policy with index 0 will be declared the default. If multiple policies are -# defined you must define a policy with index 0 and you must specify a -# default. It is recommended you always define a section for -# storage-policy:0. -[storage-policy:0] -name = Policy-0 -default = yes - -# the following section would declare a policy called 'silver', the number of -# replicas will be determined by how the ring is built. In this example the -# 'silver' policy could have a lower or higher # of replicas than the -# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You -# may only specify one storage policy section as the default. If you changed -# this section to specify 'silver' as the default, when a client created a new -# container w/o a policy specified, it will get the 'silver' policy because -# this config has specified it as the default. However if a legacy container -# (one created with a pre-policy version of swift) is accessed, it is known -# implicitly to be assigned to the policy with index 0 as opposed to the -# current default. -#[storage-policy:1] -#name = silver - -# The swift-constraints section sets the basic constraints on data -# saved in the swift cluster. These constraints are automatically -# published by the proxy server in responses to /info requests. - -[swift-constraints] - -# max_file_size is the largest "normal" object that can be saved in -# the cluster. This is also the limit on the size of each segment of -# a "large" object when using the large object manifest support. -# This value is set in bytes. Setting it to lower than 1MiB will cause -# some tests to fail. It is STRONGLY recommended to leave this value at -# the default (5 * 2**30 + 2). - -#max_file_size = 5368709122 - - -# max_meta_name_length is the max number of bytes in the utf8 encoding -# of the name portion of a metadata header. - -#max_meta_name_length = 128 - - -# max_meta_value_length is the max number of bytes in the utf8 encoding -# of a metadata value - -#max_meta_value_length = 256 - - -# max_meta_count is the max number of metadata keys that can be stored -# on a single account, container, or object - -#max_meta_count = 90 - - -# max_meta_overall_size is the max number of bytes in the utf8 encoding -# of the metadata (keys + values) - -#max_meta_overall_size = 4096 - -# max_header_size is the max number of bytes in the utf8 encoding of each -# header. Using 8192 as default because eventlet use 8192 as max size of -# header line. This value may need to be increased when using identity -# v3 API tokens including more than 7 catalog entries. -# See also include_service_catalog in proxy-server.conf-sample -# (documented in overview_auth.rst) - -#max_header_size = 8192 - - -# max_object_name_length is the max number of bytes in the utf8 encoding -# of an object name - -#max_object_name_length = 1024 - - -# container_listing_limit is the default (and max) number of items -# returned for a container listing request - -#container_listing_limit = 10000 - - -# account_listing_limit is the default (and max) number of items returned -# for an account listing request -#account_listing_limit = 10000 - - -# max_account_name_length is the max number of bytes in the utf8 encoding -# of an account name - -#max_account_name_length = 256 - - -# max_container_name_length is the max number of bytes in the utf8 encoding -# of a container name - -#max_container_name_length = 256 diff --git a/swift/usr/share/swift/hosts b/swift/usr/share/swift/hosts deleted file mode 100644 index 5b97818d..00000000 --- a/swift/usr/share/swift/hosts +++ /dev/null @@ -1 +0,0 @@ -localhost ansible_connection=local diff --git a/swift/usr/share/swift/swift-storage.yml b/swift/usr/share/swift/swift-storage.yml deleted file mode 100644 index 62a335ed..00000000 --- a/swift/usr/share/swift/swift-storage.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- hosts: localhost - vars_files: - - swift-storage-vars.yml - vars: - remote_user: root - tasks: - - user: name=swift comment="Swift user" - - file: path=/etc/swift owner=swift group=swift state=directory recurse=yes - - - template: src=/usr/share/swift/etc/rsyncd.j2 dest=/etc/rsyncd.conf - mode=0644 owner=swift group=swift - - - template: src=/usr/share/swift/etc/swift/{{ item }}.j2 - dest=/etc/swift/{{ item }}.conf mode=0644 owner=swift group=swift - with_items: - - account-server - - container-server - - object-server - - swift - - - file: path=/srv/node owner=swift group=swift state=directory recurse=yes - - file: path=/var/cache/swift owner=swift group=swift state=directory - recurse=yes diff --git a/vagrant-files/home/vagrant/.ssh/authorized_keys b/vagrant-files/home/vagrant/.ssh/authorized_keys deleted file mode 100644 index 18a9c00f..00000000 --- a/vagrant-files/home/vagrant/.ssh/authorized_keys +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key diff --git a/vagrant-files/manifest b/vagrant-files/manifest deleted file mode 100644 index 67168341..00000000 --- a/vagrant-files/manifest +++ /dev/null @@ -1,4 +0,0 @@ -0040755 0 0 /home -0040755 1000 0000 /home/vagrant -0040700 1000 1000 /home/vagrant/.ssh -0100600 1000 1000 /home/vagrant/.ssh/authorized_keys -- cgit v1.2.1