From ee53974ddc864535bbc99302d16da11287affe47 Mon Sep 17 00:00:00 2001 From: Richard Maw Date: Mon, 23 Mar 2015 21:06:52 +0000 Subject: WIP: Add OpenStack initial configuration TODO: Split this out into: 1. initial config (openstack/etc) default values 2+. as many individual changes to initial config as possible to extract, including support scripts and configuration extensions --- apache-httpd-server.configure | 23 + openstack-cinder.configure | 55 + openstack-glance.configure | 49 + openstack-keystone.configure | 56 + openstack-neutron.configure | 67 + openstack-nova-compute.configure | 41 + openstack-nova.configure | 74 + openstack-rabbitmq.configure | 47 + openstack/etc/cinder/api-paste.ini | 60 + openstack/etc/cinder/cinder.conf | 2769 +++++++++++++++ openstack/etc/cinder/policy.json | 80 + openstack/etc/cinder/rootwrap.conf | 27 + openstack/etc/cinder/rootwrap.d/volume.filters | 157 + openstack/etc/glance/glance-api-paste.ini | 72 + openstack/etc/glance/glance-api.conf | 691 ++++ openstack/etc/glance/glance-cache.conf | 200 ++ openstack/etc/glance/glance-registry-paste.ini | 25 + openstack/etc/glance/glance-registry.conf | 197 ++ openstack/etc/glance/glance-scrubber.conf | 56 + openstack/etc/glance/logging.conf | 54 + openstack/etc/glance/policy.json | 32 + openstack/etc/glance/schema-image.json | 28 + openstack/etc/horizon/apache-horizon.conf | 32 + .../horizon/openstack_dashboard/local_settings.py | 549 +++ openstack/etc/hosts | 2 + openstack/etc/keystone/keystone-paste.ini | 112 + openstack/etc/keystone/keystone.conf | 1384 ++++++++ openstack/etc/keystone/logging.conf | 39 + openstack/etc/keystone/policy.json | 144 + openstack/etc/logrotate.d/openstack-glance-api | 7 + .../etc/logrotate.d/openstack-glance-registry | 7 + openstack/etc/logrotate.d/openstack-keystone | 8 + openstack/etc/neutron/api-paste.ini | 30 + openstack/etc/neutron/dhcp_agent.ini | 89 + openstack/etc/neutron/fwaas_driver.ini | 3 + openstack/etc/neutron/l3_agent.ini | 103 + openstack/etc/neutron/lbaas_agent.ini | 42 + openstack/etc/neutron/metadata_agent.ini | 60 + openstack/etc/neutron/metering_agent.ini | 18 + openstack/etc/neutron/neutron.conf | 642 ++++ .../etc/neutron/plugins/bigswitch/restproxy.ini | 114 + .../neutron/plugins/bigswitch/ssl/ca_certs/README | 3 + .../plugins/bigswitch/ssl/host_certs/README | 6 + openstack/etc/neutron/plugins/brocade/brocade.ini | 29 + .../etc/neutron/plugins/cisco/cisco_cfg_agent.ini | 15 + .../etc/neutron/plugins/cisco/cisco_plugins.ini | 107 + .../neutron/plugins/cisco/cisco_router_plugin.ini | 76 + .../etc/neutron/plugins/cisco/cisco_vpn_agent.ini | 26 + .../etc/neutron/plugins/embrane/heleos_conf.ini | 41 + .../plugins/hyperv/hyperv_neutron_plugin.ini | 63 + .../neutron/plugins/ibm/sdnve_neutron_plugin.ini | 50 + .../plugins/linuxbridge/linuxbridge_conf.ini | 78 + .../etc/neutron/plugins/metaplugin/metaplugin.ini | 31 + openstack/etc/neutron/plugins/midonet/midonet.ini | 19 + openstack/etc/neutron/plugins/ml2/ml2_conf.ini | 85 + .../etc/neutron/plugins/ml2/ml2_conf_arista.ini | 100 + .../etc/neutron/plugins/ml2/ml2_conf_brocade.ini | 15 + .../etc/neutron/plugins/ml2/ml2_conf_cisco.ini | 118 + .../etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini | 52 + .../etc/neutron/plugins/ml2/ml2_conf_mlnx.ini | 4 + openstack/etc/neutron/plugins/ml2/ml2_conf_ncs.ini | 28 + openstack/etc/neutron/plugins/ml2/ml2_conf_odl.ini | 30 + openstack/etc/neutron/plugins/ml2/ml2_conf_ofa.ini | 13 + .../etc/neutron/plugins/ml2/ml2_conf_sriov.ini | 31 + openstack/etc/neutron/plugins/mlnx/mlnx_conf.ini | 79 + openstack/etc/neutron/plugins/nec/nec.ini | 63 + .../etc/neutron/plugins/nuage/nuage_plugin.ini | 41 + .../neutron/plugins/oneconvergence/nvsdplugin.ini | 35 + .../plugins/opencontrail/contrailplugin.ini | 26 + .../plugins/openvswitch/ovs_neutron_plugin.ini | 141 + .../etc/neutron/plugins/plumgrid/plumgrid.ini | 14 + openstack/etc/neutron/plugins/vmware/nsx.ini | 203 ++ openstack/etc/neutron/policy.json | 138 + openstack/etc/neutron/rootwrap.conf | 34 + .../etc/neutron/rootwrap.d/cisco-apic.filters | 16 + openstack/etc/neutron/rootwrap.d/debug.filters | 14 + openstack/etc/neutron/rootwrap.d/dhcp.filters | 35 + .../etc/neutron/rootwrap.d/ipset-firewall.filters | 12 + .../neutron/rootwrap.d/iptables-firewall.filters | 21 + openstack/etc/neutron/rootwrap.d/l3.filters | 48 + .../etc/neutron/rootwrap.d/lbaas-haproxy.filters | 26 + .../neutron/rootwrap.d/linuxbridge-plugin.filters | 19 + .../etc/neutron/rootwrap.d/nec-plugin.filters | 12 + openstack/etc/neutron/rootwrap.d/ofagent.filters | 16 + .../neutron/rootwrap.d/openvswitch-plugin.filters | 22 + openstack/etc/neutron/rootwrap.d/vpnaas.filters | 13 + openstack/etc/neutron/vpn_agent.ini | 14 + openstack/etc/nova/api-paste.ini | 118 + openstack/etc/nova/cells.json | 26 + openstack/etc/nova/logging.conf | 81 + openstack/etc/nova/nova-compute.conf | 4 + openstack/etc/nova/nova.conf | 631 ++++ openstack/etc/nova/nova.conf.example | 3698 ++++++++++++++++++++ openstack/etc/nova/policy.json | 324 ++ openstack/etc/nova/release.sample | 4 + openstack/etc/nova/rootwrap.conf | 27 + openstack/etc/nova/rootwrap.d/api-metadata.filters | 13 + .../nova/rootwrap.d/baremetal-compute-ipmi.filters | 9 + .../rootwrap.d/baremetal-deploy-helper.filters | 11 + openstack/etc/nova/rootwrap.d/compute.filters | 228 ++ openstack/etc/nova/rootwrap.d/network.filters | 94 + openstack/etc/sysctl.conf | 3 + .../system/apache-httpd-server-setup.service | 11 + openstack/etc/systemd/system/apache-httpd.service | 16 + .../systemd/system/openstack-cinder-api.service | 11 + .../systemd/system/openstack-cinder-backup.service | 11 + .../system/openstack-cinder-scheduler.service | 11 + .../systemd/system/openstack-cinder-setup.service | 11 + .../systemd/system/openstack-cinder-volume.service | 11 + .../systemd/system/openstack-glance-api.service | 12 + .../system/openstack-glance-registry.service | 12 + .../systemd/system/openstack-glance-setup.service | 11 + .../systemd/system/openstack-horizon-setup.service | 11 + .../system/openstack-keystone-setup.service | 11 + .../etc/systemd/system/openstack-keystone.service | 12 + .../system/openstack-neutron-dhcp-agent.service | 14 + .../system/openstack-neutron-l3-agent.service | 15 + .../openstack-neutron-metadata-agent.service | 14 + ...-neutron-network-configuration-one-node.service | 13 + .../system/openstack-neutron-ovs-cleanup.service | 14 + ...nstack-neutron-plugin-openvswitch-agent.service | 14 + .../system/openstack-neutron-server.service | 15 + .../systemd/system/openstack-neutron-setup.service | 12 + .../etc/systemd/system/openstack-nova-api.service | 11 + .../etc/systemd/system/openstack-nova-cert.service | 11 + .../systemd/system/openstack-nova-compute.service | 12 + .../system/openstack-nova-conductor.service | 12 + .../system/openstack-nova-consoleauth.service | 11 + .../system/openstack-nova-novncproxy.service | 11 + .../system/openstack-nova-scheduler.service | 11 + .../systemd/system/openstack-nova-setup.service | 11 + .../system/openstack-rabbitmq-server.service | 16 + .../system/openstack-rabbitmq-setup.service | 12 + .../openvswitch-create-links-one-node.service | 14 + .../systemd/system/openvswitch-db-server.service | 11 + .../system/openvswitch-initialize-db.service | 10 + .../etc/systemd/system/openvswitch-setup.service | 11 + openstack/etc/systemd/system/openvswitch.service | 10 + .../etc/systemd/system/postgres-server.service | 26 + .../etc/systemd/system/postgres-setup.service | 11 + openstack/manifest | 187 + .../usr/share/openstack/apache-httpd-server-setup | 47 + .../share/openstack/create_openvswitch_veth_pairs | 32 + .../usr/share/openstack/openstack-cinder-setup | 132 + .../usr/share/openstack/openstack-glance-setup | 89 + .../usr/share/openstack/openstack-horizon-setup | 63 + .../usr/share/openstack/openstack-keystone-setup | 92 + ...tack-neutron-network-configuration-for-one-node | 56 + .../usr/share/openstack/openstack-neutron-setup | 95 + openstack/usr/share/openstack/openstack-nova-setup | 133 + .../usr/share/openstack/openstack-rabbitmq-setup | 66 + openstack/usr/share/openstack/openvswitch-setup | 44 + openstack/usr/share/openstack/postgres-setup | 42 + openvswitch.configure | 30 + postgres.configure | 23 + 155 files changed, 17182 insertions(+) create mode 100644 apache-httpd-server.configure create mode 100644 openstack-cinder.configure create mode 100644 openstack-glance.configure create mode 100644 openstack-keystone.configure create mode 100644 openstack-neutron.configure create mode 100644 openstack-nova-compute.configure create mode 100644 openstack-nova.configure create mode 100644 openstack-rabbitmq.configure create mode 100644 openstack/etc/cinder/api-paste.ini create mode 100644 openstack/etc/cinder/cinder.conf create mode 100644 openstack/etc/cinder/policy.json create mode 100644 openstack/etc/cinder/rootwrap.conf create mode 100644 openstack/etc/cinder/rootwrap.d/volume.filters create mode 100644 openstack/etc/glance/glance-api-paste.ini create mode 100644 openstack/etc/glance/glance-api.conf create mode 100644 openstack/etc/glance/glance-cache.conf create mode 100644 openstack/etc/glance/glance-registry-paste.ini create mode 100644 openstack/etc/glance/glance-registry.conf create mode 100644 openstack/etc/glance/glance-scrubber.conf create mode 100644 openstack/etc/glance/logging.conf create mode 100644 openstack/etc/glance/policy.json create mode 100644 openstack/etc/glance/schema-image.json create mode 100644 openstack/etc/horizon/apache-horizon.conf create mode 100644 openstack/etc/horizon/openstack_dashboard/local_settings.py create mode 100644 openstack/etc/hosts create mode 100644 openstack/etc/keystone/keystone-paste.ini create mode 100644 openstack/etc/keystone/keystone.conf create mode 100644 openstack/etc/keystone/logging.conf create mode 100644 openstack/etc/keystone/policy.json create mode 100644 openstack/etc/logrotate.d/openstack-glance-api create mode 100644 openstack/etc/logrotate.d/openstack-glance-registry create mode 100644 openstack/etc/logrotate.d/openstack-keystone create mode 100644 openstack/etc/neutron/api-paste.ini create mode 100644 openstack/etc/neutron/dhcp_agent.ini create mode 100644 openstack/etc/neutron/fwaas_driver.ini create mode 100644 openstack/etc/neutron/l3_agent.ini create mode 100644 openstack/etc/neutron/lbaas_agent.ini create mode 100644 openstack/etc/neutron/metadata_agent.ini create mode 100644 openstack/etc/neutron/metering_agent.ini create mode 100644 openstack/etc/neutron/neutron.conf create mode 100644 openstack/etc/neutron/plugins/bigswitch/restproxy.ini create mode 100644 openstack/etc/neutron/plugins/bigswitch/ssl/ca_certs/README create mode 100644 openstack/etc/neutron/plugins/bigswitch/ssl/host_certs/README create mode 100644 openstack/etc/neutron/plugins/brocade/brocade.ini create mode 100644 openstack/etc/neutron/plugins/cisco/cisco_cfg_agent.ini create mode 100644 openstack/etc/neutron/plugins/cisco/cisco_plugins.ini create mode 100644 openstack/etc/neutron/plugins/cisco/cisco_router_plugin.ini create mode 100644 openstack/etc/neutron/plugins/cisco/cisco_vpn_agent.ini create mode 100644 openstack/etc/neutron/plugins/embrane/heleos_conf.ini create mode 100644 openstack/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini create mode 100644 openstack/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini create mode 100644 openstack/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini create mode 100644 openstack/etc/neutron/plugins/metaplugin/metaplugin.ini create mode 100644 openstack/etc/neutron/plugins/midonet/midonet.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf_arista.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf_brocade.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf_cisco.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf_ncs.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf_odl.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf_ofa.ini create mode 100644 openstack/etc/neutron/plugins/ml2/ml2_conf_sriov.ini create mode 100644 openstack/etc/neutron/plugins/mlnx/mlnx_conf.ini create mode 100644 openstack/etc/neutron/plugins/nec/nec.ini create mode 100644 openstack/etc/neutron/plugins/nuage/nuage_plugin.ini create mode 100644 openstack/etc/neutron/plugins/oneconvergence/nvsdplugin.ini create mode 100644 openstack/etc/neutron/plugins/opencontrail/contrailplugin.ini create mode 100644 openstack/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini create mode 100644 openstack/etc/neutron/plugins/plumgrid/plumgrid.ini create mode 100644 openstack/etc/neutron/plugins/vmware/nsx.ini create mode 100644 openstack/etc/neutron/policy.json create mode 100644 openstack/etc/neutron/rootwrap.conf create mode 100644 openstack/etc/neutron/rootwrap.d/cisco-apic.filters create mode 100644 openstack/etc/neutron/rootwrap.d/debug.filters create mode 100644 openstack/etc/neutron/rootwrap.d/dhcp.filters create mode 100644 openstack/etc/neutron/rootwrap.d/ipset-firewall.filters create mode 100644 openstack/etc/neutron/rootwrap.d/iptables-firewall.filters create mode 100644 openstack/etc/neutron/rootwrap.d/l3.filters create mode 100644 openstack/etc/neutron/rootwrap.d/lbaas-haproxy.filters create mode 100644 openstack/etc/neutron/rootwrap.d/linuxbridge-plugin.filters create mode 100644 openstack/etc/neutron/rootwrap.d/nec-plugin.filters create mode 100644 openstack/etc/neutron/rootwrap.d/ofagent.filters create mode 100644 openstack/etc/neutron/rootwrap.d/openvswitch-plugin.filters create mode 100644 openstack/etc/neutron/rootwrap.d/vpnaas.filters create mode 100644 openstack/etc/neutron/vpn_agent.ini create mode 100644 openstack/etc/nova/api-paste.ini create mode 100644 openstack/etc/nova/cells.json create mode 100644 openstack/etc/nova/logging.conf create mode 100644 openstack/etc/nova/nova-compute.conf create mode 100644 openstack/etc/nova/nova.conf create mode 100644 openstack/etc/nova/nova.conf.example create mode 100644 openstack/etc/nova/policy.json create mode 100644 openstack/etc/nova/release.sample create mode 100644 openstack/etc/nova/rootwrap.conf create mode 100644 openstack/etc/nova/rootwrap.d/api-metadata.filters create mode 100644 openstack/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters create mode 100644 openstack/etc/nova/rootwrap.d/baremetal-deploy-helper.filters create mode 100644 openstack/etc/nova/rootwrap.d/compute.filters create mode 100644 openstack/etc/nova/rootwrap.d/network.filters create mode 100644 openstack/etc/sysctl.conf create mode 100644 openstack/etc/systemd/system/apache-httpd-server-setup.service create mode 100644 openstack/etc/systemd/system/apache-httpd.service create mode 100644 openstack/etc/systemd/system/openstack-cinder-api.service create mode 100644 openstack/etc/systemd/system/openstack-cinder-backup.service create mode 100644 openstack/etc/systemd/system/openstack-cinder-scheduler.service create mode 100644 openstack/etc/systemd/system/openstack-cinder-setup.service create mode 100644 openstack/etc/systemd/system/openstack-cinder-volume.service create mode 100644 openstack/etc/systemd/system/openstack-glance-api.service create mode 100644 openstack/etc/systemd/system/openstack-glance-registry.service create mode 100644 openstack/etc/systemd/system/openstack-glance-setup.service create mode 100644 openstack/etc/systemd/system/openstack-horizon-setup.service create mode 100644 openstack/etc/systemd/system/openstack-keystone-setup.service create mode 100644 openstack/etc/systemd/system/openstack-keystone.service create mode 100644 openstack/etc/systemd/system/openstack-neutron-dhcp-agent.service create mode 100644 openstack/etc/systemd/system/openstack-neutron-l3-agent.service create mode 100644 openstack/etc/systemd/system/openstack-neutron-metadata-agent.service create mode 100644 openstack/etc/systemd/system/openstack-neutron-network-configuration-one-node.service create mode 100644 openstack/etc/systemd/system/openstack-neutron-ovs-cleanup.service create mode 100644 openstack/etc/systemd/system/openstack-neutron-plugin-openvswitch-agent.service create mode 100644 openstack/etc/systemd/system/openstack-neutron-server.service create mode 100644 openstack/etc/systemd/system/openstack-neutron-setup.service create mode 100644 openstack/etc/systemd/system/openstack-nova-api.service create mode 100644 openstack/etc/systemd/system/openstack-nova-cert.service create mode 100644 openstack/etc/systemd/system/openstack-nova-compute.service create mode 100644 openstack/etc/systemd/system/openstack-nova-conductor.service create mode 100644 openstack/etc/systemd/system/openstack-nova-consoleauth.service create mode 100644 openstack/etc/systemd/system/openstack-nova-novncproxy.service create mode 100644 openstack/etc/systemd/system/openstack-nova-scheduler.service create mode 100644 openstack/etc/systemd/system/openstack-nova-setup.service create mode 100644 openstack/etc/systemd/system/openstack-rabbitmq-server.service create mode 100644 openstack/etc/systemd/system/openstack-rabbitmq-setup.service create mode 100644 openstack/etc/systemd/system/openvswitch-create-links-one-node.service create mode 100644 openstack/etc/systemd/system/openvswitch-db-server.service create mode 100644 openstack/etc/systemd/system/openvswitch-initialize-db.service create mode 100644 openstack/etc/systemd/system/openvswitch-setup.service create mode 100644 openstack/etc/systemd/system/openvswitch.service create mode 100644 openstack/etc/systemd/system/postgres-server.service create mode 100644 openstack/etc/systemd/system/postgres-setup.service create mode 100644 openstack/manifest create mode 100755 openstack/usr/share/openstack/apache-httpd-server-setup create mode 100644 openstack/usr/share/openstack/create_openvswitch_veth_pairs create mode 100644 openstack/usr/share/openstack/openstack-cinder-setup create mode 100644 openstack/usr/share/openstack/openstack-glance-setup create mode 100644 openstack/usr/share/openstack/openstack-horizon-setup create mode 100644 openstack/usr/share/openstack/openstack-keystone-setup create mode 100644 openstack/usr/share/openstack/openstack-neutron-network-configuration-for-one-node create mode 100644 openstack/usr/share/openstack/openstack-neutron-setup create mode 100644 openstack/usr/share/openstack/openstack-nova-setup create mode 100644 openstack/usr/share/openstack/openstack-rabbitmq-setup create mode 100644 openstack/usr/share/openstack/openvswitch-setup create mode 100644 openstack/usr/share/openstack/postgres-setup create mode 100644 openvswitch.configure create mode 100644 postgres.configure diff --git a/apache-httpd-server.configure b/apache-httpd-server.configure new file mode 100644 index 00000000..a2a5e2f9 --- /dev/null +++ b/apache-httpd-server.configure @@ -0,0 +1,23 @@ +#!/bin/sh + +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +ln -sf "/etc/systemd/system/apache-httpd.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/apache-httpd.service" diff --git a/openstack-cinder.configure b/openstack-cinder.configure new file mode 100644 index 00000000..335ff512 --- /dev/null +++ b/openstack-cinder.configure @@ -0,0 +1,55 @@ +#!/bin/sh + +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +########################################################################## +# Substitutions in configuration files +########################################################################## + +cat < "$ROOT"/etc/openstack-cinder-setup.sed +s/##CINDER_USER##/$CINDER_USER/g +s/##CINDER_PASSWORD##/$CINDER_PASSWORD/g +s/##CINDER_PUBLIC_URL##/$CINDER_PUBLIC_URL/g +s/##CINDER_INTERNAL_URL##/$CINDER_INTERNAL_URL/g +s/##CINDER_ADMIN_URL##/$CINDER_ADMIN_URL/g +s/##CINDER_USER_V2##/$CINDER_USER_V2/g +s/##CINDER_PUBLIC_URL_V2##/$CINDER_PUBLIC_URL_V2/g +s/##CINDER_INTERNAL_URL_V2##/$CINDER_INTERNAL_URL_V2/g +s/##CINDER_ADMIN_URL_V2##/$CINDER_ADMIN_URL_V2/g +s/##CINDER_DB_USER##/$CINDER_DB_USER/g +s/##CINDER_DB_PASSWORD##/$CINDER_DB_PASSWORD/g +EOF + +sed -f "$ROOT"/etc/openstack-cinder-setup.sed -i \ + "$ROOT"/etc/cinder/cinder.conf \ + "$ROOT"/usr/share/openstack/openstack-cinder-setup + +########################################################################## + +ln -sf "/etc/systemd/system/openstack-cinder-setup.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-cinder-setup.service" + +########################################################################## +# Add cinder to sudoers controlling which commands is running as a root +# using the openstack rootwrap. +########################################################################## +install -D -m 0440 /proc/self/fd/0 <<'EOF' "$ROOT"/etc/sudoers.d/cinder-rootwrap +cinder ALL=(root) NOPASSWD: /usr/bin/cinder-rootwrap /etc/cinder/rootwrap.conf * +EOF diff --git a/openstack-glance.configure b/openstack-glance.configure new file mode 100644 index 00000000..6780a64a --- /dev/null +++ b/openstack-glance.configure @@ -0,0 +1,49 @@ +#!/bin/sh + +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +########################################################################## +# Substitutions in configuration files +########################################################################## + +cat < "$ROOT"/etc/openstack-glance-setup.sed +s/##GLANCE_SERVICE_USER##/$GLANCE_SERVICE_USER/g +s/##GLANCE_SERVICE_PASSWORD##/$GLANCE_SERVICE_PASSWORD/g +s/##GLANCE_PUBLIC_URL##/$GLANCE_PUBLIC_URL/g +s/##GLANCE_INTERNAL_URL##/$GLANCE_INTERNAL_URL/g +s/##GLANCE_ADMIN_URL##/$GLANCE_ADMIN_URL/g +s/##GLANCE_HOST##/$GLANCE_HOST/g +s/##GLANCE_DB_USER##/$GLANCE_DB_USER/g +s/##GLANCE_DB_PASSWORD##/$GLANCE_DB_PASSWORD/g +EOF + +sed -f "$ROOT"/etc/openstack-glance-setup.sed -i \ + "$ROOT"/etc/glance/glance-api.conf \ + "$ROOT"/etc/glance/glance-registry.conf \ + "$ROOT"/etc/glance/glance-scrubber.conf \ + "$ROOT"/etc/glance/glance-cache.conf \ + "$ROOT"/etc/cinder/cinder.conf \ + "$ROOT"/etc/nova/nova.conf \ + "$ROOT"/usr/share/openstack/openstack-glance-setup + +########################################################################## + +ln -s "/etc/systemd/system/openstack-glance-setup.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-glance-setup.service" diff --git a/openstack-keystone.configure b/openstack-keystone.configure new file mode 100644 index 00000000..d2d1171e --- /dev/null +++ b/openstack-keystone.configure @@ -0,0 +1,56 @@ +#!/bin/sh + +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +########################################################################## +# Substitutions in configuration files +########################################################################## + +cat < "$ROOT"/etc/openstack-keystone-setup.sed +s/##KEYSTONE_TEMPORARY_ADMIN_TOKEN##/$KEYSTONE_TEMPORARY_ADMIN_TOKEN/g +s/##KEYSTONE_TEMPORARY_ADMIN_PASSWORD##/$KEYSTONE_TEMPORARY_ADMIN_PASSWORD/g +s/##KEYSTONE_PUBLIC_URL##/$KEYSTONE_PUBLIC_URL/g +s/##KEYSTONE_INTERNAL_URL##/$KEYSTONE_INTERNAL_URL/g +s/##KEYSTONE_ADMIN_URL##/$KEYSTONE_ADMIN_URL/g +s/##OPENSTACK_AUTH_HOST##/$OPENSTACK_AUTH_HOST/g +s/##OPENSTACK_AUTH_PORT##/$OPENSTACK_AUTH_PORT/g +s/##IDENTITY_URI##/$IDENTITY_URI/g +s/##KEYSTONE_DB_USER##/$KEYSTONE_DB_USER/g +s/##KEYSTONE_DB_PASSWORD##/$KEYSTONE_DB_PASSWORD/g +EOF + +sed -f "$ROOT"/etc/openstack-keystone-setup.sed -i \ + "$ROOT"/etc/keystone/keystone.conf \ + "$ROOT"/etc/glance/glance-api.conf \ + "$ROOT"/etc/glance/glance-registry.conf \ + "$ROOT"/etc/nova/nova.conf \ + "$ROOT"/etc/cinder/cinder.conf \ + "$ROOT"/etc/neutron/neutron.conf \ + "$ROOT"/etc/neutron/metadata_agent.ini \ + "$ROOT"/usr/share/openstack/openstack-keystone-setup \ + "$ROOT"/usr/share/openstack/openstack-glance-setup \ + "$ROOT"/usr/share/openstack/openstack-nova-setup \ + "$ROOT"/usr/share/openstack/openstack-neutron-setup \ + "$ROOT"/usr/share/openstack/openstack-cinder-setup + +########################################################################## + +ln -s "/etc/systemd/system/openstack-keystone-setup.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-keystone-setup.service" diff --git a/openstack-neutron.configure b/openstack-neutron.configure new file mode 100644 index 00000000..5f5754a9 --- /dev/null +++ b/openstack-neutron.configure @@ -0,0 +1,67 @@ +#!/bin/bash + +# Copyright (C) 2014-2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +########################################################################## +# Substitutions in configuration files # +########################################################################## + +cat < "$ROOT"/etc/openstack-neutron-setup.sed +s/##NEUTRON_SERVICE_USER##/$NEUTRON_SERVICE_USER/g +s/##NEUTRON_SERVICE_PASSWORD##/$NEUTRON_SERVICE_PASSWORD/g +s/##NEUTRON_DB_USER##/$NEUTRON_DB_USER/g +s/##NEUTRON_DB_PASSWORD##/$NEUTRON_DB_PASSWORD/g +s/##NEUTRON_PUBLIC_URL##/$NEUTRON_PUBLIC_URL/g +s/##NEUTRON_INTERNAL_URL##/$NEUTRON_INTERNAL_URL/g +s/##NEUTRON_ADMIN_URL##/$NEUTRON_ADMIN_URL/g +s/##METADATA_PROXY_SHARED_SECRET##/$METADATA_PROXY_SHARED_SECRET/g +EOF + +sed -f "$ROOT"/etc/openstack-neutron-setup.sed -i \ + "$ROOT"/etc/neutron/neutron.conf \ + "$ROOT"/etc/neutron/metadata_agent.ini \ + "$ROOT"/etc/nova/nova.conf \ + "$ROOT"/usr/share/openstack/openstack-neutron-setup + +########################################################################## +# Create the links to enable the neutron systemd services # +########################################################################## +services=("openstack-neutron-network-configuration-one-node.service" \ + "openvswitch-create-links-one-node.service" \ + "openstack-neutron-dhcp-agent.service" \ + "openstack-neutron-l3-agent.service" \ + "openstack-neutron-metadata-agent.service" \ + "openstack-neutron-ovs-cleanup.service" \ + "openstack-neutron-plugin-openvswitch-agent.service" \ + "openstack-neutron-server.service" \ + "openstack-neutron-setup.service") + +for service in ${services[@]}; do + ln -sf "/etc/systemd/system/$service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$service" +done + +########################################################################## +# Add neutron to sudoers controlling which commands is running as a root # +# using the openstack rootwrap. # +########################################################################## +install -D -m 0440 /proc/self/fd/0 <<'EOF' "$ROOT"/etc/sudoers.d/neutron-rootwrap +neutron ALL=(root) NOPASSWD: /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf * +EOF diff --git a/openstack-nova-compute.configure b/openstack-nova-compute.configure new file mode 100644 index 00000000..9487c759 --- /dev/null +++ b/openstack-nova-compute.configure @@ -0,0 +1,41 @@ +#!/bin/sh + +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +########################################################################## +# Substitutions in configuration files +########################################################################## + +cat < "$ROOT"/etc/openstack-nova-compute-setup.sed +s/##NOVA_SERVICE_USER##/$NOVA_SERVICE_USER/g +s/##NOVA_SERVICE_PASSWORD##/$NOVA_SERVICE_PASSWORD/g +s/##NOVA_PUBLIC_URL##/$NOVA_PUBLIC_URL/g +s/##NOVA_INTERNAL_URL##/$NOVA_INTERNAL_URL/g +s/##NOVA_ADMIN_URL##/$NOVA_ADMIN_URL/g +EOF + +sed -f "$ROOT"/etc/openstack-nova-compute-setup.sed -i \ + "$ROOT"/etc/nova/nova.conf \ + "$ROOT"/usr/share/openstack/openstack-nova-compute-setup + +########################################################################## + +ln -s "/etc/systemd/system/openstack-nova-compute-setup.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-compute-setup.service" diff --git a/openstack-nova.configure b/openstack-nova.configure new file mode 100644 index 00000000..efa6eb11 --- /dev/null +++ b/openstack-nova.configure @@ -0,0 +1,74 @@ +#!/bin/sh + +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +########################################################################## +# Substitutions in configuration files +########################################################################## + +cat < "$ROOT"/etc/openstack-nova-setup.sed +s/##NOVA_SERVICE_USER##/$NOVA_SERVICE_USER/g +s/##NOVA_SERVICE_PASSWORD##/$NOVA_SERVICE_PASSWORD/g +s/##NOVA_PUBLIC_URL##/$NOVA_PUBLIC_URL/g +s/##NOVA_INTERNAL_URL##/$NOVA_INTERNAL_URL/g +s/##NOVA_ADMIN_URL##/$NOVA_ADMIN_URL/g +s/##NOVA_HOST##/$NOVA_HOST/g +s/##NOVA_REGION##/$NOVA_REGION/g +s/##NOVA_NOVNCPROXY_BASE_URL##/$NOVA_NOVNCPROXY_BASE_URL/g +s/##NOVA_DB_USER##/$NOVA_DB_USER/g +s/##NOVA_DB_PASSWORD##/$NOVA_DB_PASSWORD/g +EOF + +sed -f "$ROOT"/etc/openstack-nova-setup.sed -i \ + "$ROOT"/etc/nova/nova.conf \ + "$ROOT"/etc/neutron/neutron.conf \ + "$ROOT"/etc/neutron/metadata_agent.ini \ + "$ROOT"/usr/share/openstack/openstack-nova-setup + +########################################################################## + +ln -sf "/etc/systemd/system/openstack-nova-setup.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-setup.service" + +########################################################################## +# Enable libvirtd and libvirt-guests services +########################################################################## + +wants_dir="$ROOT"/usr/lib/systemd/system/multi-user.target.wants +mkdir -p "$wants_dir" +mkdir -p "$ROOT"/var/lock/subsys +ln -sf ../libvirtd.service "$wants_dir/libvirtd.service" +ln -sf ../libvirt-guests.service "$wants_dir/libvirt-guests.service" + +########################################################################## +# Change iprange for the interal libvirt to avoid clashes +# with eth0 ip range +########################################################################## + +sed -i "s/192\.168\.122\./192\.168\.1\./g" \ + "$ROOT"/etc/libvirt/qemu/networks/default.xml + +########################################################################## +# Add nova to sudoers controlling which commands is running as a root +# using the openstack rootwrap. +########################################################################## +install -D -m 0440 /proc/self/fd/0 <<'EOF' "$ROOT"/etc/sudoers.d/nova-rootwrap +nova ALL=(root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf * +EOF diff --git a/openstack-rabbitmq.configure b/openstack-rabbitmq.configure new file mode 100644 index 00000000..40c37698 --- /dev/null +++ b/openstack-rabbitmq.configure @@ -0,0 +1,47 @@ +#!/bin/sh + +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +########################################################################## +# Substitutions in configuration files +########################################################################## + +cat < "$ROOT"/etc/openstack-rabbitmq-setup.sed +s/##RABBITMQ_HOST##/$RABBITMQ_HOST/g +s/##RABBITMQ_PORT##/$RABBITMQ_PORT/g +s/##RABBITMQ_USER##/$RABBITMQ_USER/g +s/##RABBITMQ_PASSWORD##/$RABBITMQ_PASSWORD/g +EOF + +sed -f "$ROOT"/etc/openstack-rabbitmq-setup.sed -i \ + "$ROOT"/etc/keystone/keystone.conf \ + "$ROOT"/etc/nova/nova.conf \ + "$ROOT"/etc/cinder/cinder.conf \ + "$ROOT"/etc/neutron/neutron.conf \ + "$ROOT"/etc/glance/glance-api.conf \ + "$ROOT"/usr/share/openstack/openstack-rabbitmq-setup + +########################################################################## + +ln -s "/etc/systemd/system/openstack-rabbitmq-setup.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-rabbitmq-setup.service" + +ln -s "/etc/systemd/system/openstack-rabbitmq-server.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-rabbitmq-server.service" diff --git a/openstack/etc/cinder/api-paste.ini b/openstack/etc/cinder/api-paste.ini new file mode 100644 index 00000000..31619fc8 --- /dev/null +++ b/openstack/etc/cinder/api-paste.ini @@ -0,0 +1,60 @@ +############# +# OpenStack # +############# + +[composite:osapi_volume] +use = call:cinder.api:root_app_factory +/: apiversions +/v1: openstack_volume_api_v1 +/v2: openstack_volume_api_v2 + +[composite:openstack_volume_api_v1] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = request_id faultwrap sizelimit osprofiler noauth apiv1 +keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 +keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 + +[composite:openstack_volume_api_v2] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = request_id faultwrap sizelimit osprofiler noauth apiv2 +keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 +keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 + +[filter:request_id] +paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:faultwrap] +paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory +hmac_keys = SECRET_KEY +enabled = yes + +[filter:noauth] +paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory + +[app:apiv1] +paste.app_factory = cinder.api.v1.router:APIRouter.factory + +[app:apiv2] +paste.app_factory = cinder.api.v2.router:APIRouter.factory + +[pipeline:apiversions] +pipeline = faultwrap osvolumeversionapp + +[app:osvolumeversionapp] +paste.app_factory = cinder.api.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/openstack/etc/cinder/cinder.conf b/openstack/etc/cinder/cinder.conf new file mode 100644 index 00000000..764c08bc --- /dev/null +++ b/openstack/etc/cinder/cinder.conf @@ -0,0 +1,2769 @@ +[DEFAULT] + +state_path = /var/lib/cinder +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer +# value) +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +rabbit_host=##RABBITMQ_HOST## + +# The RabbitMQ broker port where a single node is used. +# (integer value) +rabbit_port=##RABBITMQ_PORT## + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid=##RABBITMQ_USER## + +# The RabbitMQ password. (string value) +rabbit_password=##RABBITMQ_PASSWORD## + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=cinder + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in cinder.exception +# + +# Make exception message format errors fatal. (boolean value) +#fatal_exception_format_errors=false + + +# +# Options defined in cinder.quota +# + +# Number of volumes allowed per project (integer value) +#quota_volumes=10 + +# Number of volume snapshots allowed per project (integer +# value) +#quota_snapshots=10 + +# Number of consistencygroups allowed per project (integer +# value) +#quota_consistencygroups=10 + +# Total amount of storage, in gigabytes, allowed for volumes +# and snapshots per project (integer value) +#quota_gigabytes=1000 + +# Number of volume backups allowed per project (integer value) +#quota_backups=10 + +# Total amount of storage, in gigabytes, allowed for backups +# per project (integer value) +#quota_backup_gigabytes=1000 + +# Number of seconds until a reservation expires (integer +# value) +#reservation_expire=86400 + +# Count of reservations until usage is refreshed (integer +# value) +#until_refresh=0 + +# Number of seconds between subsequent usage refreshes +# (integer value) +#max_age=0 + +# Default driver to use for quota checks (string value) +#quota_driver=cinder.quota.DbQuotaDriver + +# Enables or disables use of default quota class with default +# quota. (boolean value) +#use_default_quota_class=true + + +# +# Options defined in cinder.service +# + +# Interval, in seconds, between nodes reporting state to +# datastore (integer value) +#report_interval=10 + +# Interval, in seconds, between running periodic tasks +# (integer value) +#periodic_interval=60 + +# Range, in seconds, to randomly delay when starting the +# periodic task scheduler to reduce stampeding. (Disable by +# setting to 0) (integer value) +#periodic_fuzzy_delay=60 + +# IP address on which OpenStack Volume API listens (string +# value) +#osapi_volume_listen=0.0.0.0 + +# Port on which OpenStack Volume API listens (integer value) +#osapi_volume_listen_port=8776 + +# Number of workers for OpenStack Volume API service. The +# default is equal to the number of CPUs available. (integer +# value) +#osapi_volume_workers= + + +# +# Options defined in cinder.ssh_utils +# + +# Option to enable strict host key checking. When set to +# "True" Cinder will only connect to systems with a host key +# present in the configured "ssh_hosts_key_file". When set to +# "False" the host key will be saved upon first connection and +# used for subsequent connections. Default=False (boolean +# value) +#strict_ssh_host_key_policy=false + +# File containing SSH host keys for the systems with which +# Cinder needs to communicate. OPTIONAL: +# Default=$state_path/ssh_known_hosts (string value) +#ssh_hosts_key_file=$state_path/ssh_known_hosts + + +# +# Options defined in cinder.test +# + +# File name of clean sqlite db (string value) +#sqlite_clean_db=clean.sqlite + + +# +# Options defined in cinder.wsgi +# + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + +# Sets the value of TCP_KEEPALIVE (True/False) for each server +# socket. (boolean value) +#tcp_keepalive=true + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepidle=600 + +# Sets the value of TCP_KEEPINTVL in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepalive_interval= + +# Sets the value of TCP_KEEPCNT for each server socket. Not +# supported on OS X. (integer value) +#tcp_keepalive_count= + +# CA certificate file to use to verify connecting clients +# (string value) +#ssl_ca_file= + +# Certificate file to use when starting the server securely +# (string value) +#ssl_cert_file= + +# Private key file to use when starting the server securely +# (string value) +#ssl_key_file= + + +# +# Options defined in cinder.api.common +# + +# The maximum number of items that a collection resource +# returns in a single response (integer value) +#osapi_max_limit=1000 + +# Base URL that will be presented to users in links to the +# OpenStack Volume API (string value) +# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix +#osapi_volume_base_URL= + + +# +# Options defined in cinder.api.middleware.auth +# + +# Treat X-Forwarded-For as the canonical remote address. Only +# enable this if you have a sanitizing proxy. (boolean value) +#use_forwarded_for=false + + +# +# Options defined in cinder.api.middleware.sizelimit +# + +# Max size for body of a request (integer value) +#osapi_max_request_body_size=114688 + + +# +# Options defined in cinder.backup.driver +# + +# Backup metadata version to be used when backing up volume +# metadata. If this number is bumped, make sure the service +# doing the restore supports the new version. (integer value) +#backup_metadata_version=1 + + +# +# Options defined in cinder.backup.drivers.ceph +# + +# Ceph configuration file to use. (string value) +#backup_ceph_conf=/etc/ceph/ceph.conf + +# The Ceph user to connect with. Default here is to use the +# same user as for Cinder volumes. If not using cephx this +# should be set to None. (string value) +#backup_ceph_user=cinder + +# The chunk size, in bytes, that a backup is broken into +# before transfer to the Ceph object store. (integer value) +#backup_ceph_chunk_size=134217728 + +# The Ceph pool where volume backups are stored. (string +# value) +#backup_ceph_pool=backups + +# RBD stripe unit to use when creating a backup image. +# (integer value) +#backup_ceph_stripe_unit=0 + +# RBD stripe count to use when creating a backup image. +# (integer value) +#backup_ceph_stripe_count=0 + +# If True, always discard excess bytes when restoring volumes +# i.e. pad with zeroes. (boolean value) +#restore_discard_excess_bytes=true + + +# +# Options defined in cinder.backup.drivers.swift +# + +# The URL of the Swift endpoint (string value) +#backup_swift_url= + +# Info to match when looking for swift in the service catalog. +# Format is: separated values of the form: +# :: - Only used if +# backup_swift_url is unset (string value) +#swift_catalog_info=object-store:swift:publicURL + +# Swift authentication mechanism (string value) +#backup_swift_auth=per_user + +# Swift authentication version. Specify "1" for auth 1.0, or +# "2" for auth 2.0 (string value) +#backup_swift_auth_version=1 + +# Swift tenant/account name. Required when connecting to an +# auth 2.0 system (string value) +#backup_swift_tenant= + +# Swift user name (string value) +#backup_swift_user= + +# Swift key for authentication (string value) +#backup_swift_key= + +# The default Swift container to use (string value) +#backup_swift_container=volumebackups + +# The size in bytes of Swift backup objects (integer value) +#backup_swift_object_size=52428800 + +# The number of retries to make for Swift operations (integer +# value) +#backup_swift_retry_attempts=3 + +# The backoff time in seconds between Swift retries (integer +# value) +#backup_swift_retry_backoff=2 + +# Compression algorithm (None to disable) (string value) +#backup_compression_algorithm=zlib + + +# +# Options defined in cinder.backup.drivers.tsm +# + +# Volume prefix for the backup id when backing up to TSM +# (string value) +#backup_tsm_volume_prefix=backup + +# TSM password for the running username (string value) +#backup_tsm_password=password + +# Enable or Disable compression for backups (boolean value) +#backup_tsm_compression=true + + +# +# Options defined in cinder.backup.manager +# + +# Driver to use for backups. (string value) +# Deprecated group/name - [DEFAULT]/backup_service +#backup_driver=cinder.backup.drivers.swift + + +# +# Options defined in cinder.common.config +# + +# File name for the paste.deploy config for cinder-api (string +# value) +api_paste_config=api-paste.ini + +# Top-level directory for maintaining cinder's state (string +# value) +# Deprecated group/name - [DEFAULT]/pybasedir +#state_path=/var/lib/cinder + +# IP address of this host (string value) +my_ip=##GLANCE_HOST## + +# Default glance host name or IP (string value) +glance_host=$my_ip + +# Default glance port (integer value) +#glance_port=9292 + +# A list of the glance API servers available to cinder +# ([hostname|ip]:port) (list value) +#glance_api_servers=$glance_host:$glance_port + +# Version of the glance API to use (integer value) +#glance_api_version=1 + +# Number retries when downloading an image from glance +# (integer value) +#glance_num_retries=0 + +# Allow to perform insecure SSL (https) requests to glance +# (boolean value) +#glance_api_insecure=false + +# Enables or disables negotiation of SSL layer compression. In +# some cases disabling compression can improve data +# throughput, such as when high network bandwidth is available +# and you use compressed image formats like qcow2. (boolean +# value) +#glance_api_ssl_compression=false + +# Location of ca certificates file to use for glance client +# requests. (string value) +#glance_ca_certificates_file= + +# http/https timeout value for glance operations. If no value +# (None) is supplied here, the glanceclient default value is +# used. (integer value) +#glance_request_timeout= + +# The topic that scheduler nodes listen on (string value) +#scheduler_topic=cinder-scheduler + +# The topic that volume nodes listen on (string value) +#volume_topic=cinder-volume + +# The topic that volume backup nodes listen on (string value) +#backup_topic=cinder-backup + +# DEPRECATED: Deploy v1 of the Cinder API. (boolean value) +#enable_v1_api=true + +# Deploy v2 of the Cinder API. (boolean value) +#enable_v2_api=true + +# Enables or disables rate limit of the API. (boolean value) +#api_rate_limit=true + +# Specify list of extensions to load when using +# osapi_volume_extension option with +# cinder.api.contrib.select_extensions (list value) +#osapi_volume_ext_list= + +# osapi volume extension to load (multi valued) +#osapi_volume_extension=cinder.api.contrib.standard_extensions + +# Full class name for the Manager for volume (string value) +#volume_manager=cinder.volume.manager.VolumeManager + +# Full class name for the Manager for volume backup (string +# value) +#backup_manager=cinder.backup.manager.BackupManager + +# Full class name for the Manager for scheduler (string value) +#scheduler_manager=cinder.scheduler.manager.SchedulerManager + +# Name of this node. This can be an opaque identifier. It is +# not necessarily a host name, FQDN, or IP address. (string +# value) +#host=cinder + +# Availability zone of this node (string value) +#storage_availability_zone=nova + +# Default availability zone for new volumes. If not set, the +# storage_availability_zone option value is used as the +# default for new volumes. (string value) +#default_availability_zone= + +# Default volume type to use (string value) +#default_volume_type= + +# Time period for which to generate volume usages. The options +# are hour, day, month, or year. (string value) +#volume_usage_audit_period=month + +# Path to the rootwrap configuration file to use for running +# commands as root (string value) +rootwrap_config=/etc/cinder/rootwrap.conf + +# Enable monkey patching (boolean value) +#monkey_patch=false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules= + +# Maximum time since last check-in for a service to be +# considered up (integer value) +#service_down_time=60 + +# The full class name of the volume API class to use (string +# value) +#volume_api_class=cinder.volume.api.API + +# The full class name of the volume backup API class (string +# value) +#backup_api_class=cinder.backup.api.API + +# The strategy to use for auth. Supports noauth, keystone, and +# deprecated. (string value) +auth_strategy=keystone + +# A list of backend names to use. These backend names should +# be backed by a unique [CONFIG] group with its options (list +# value) +#enabled_backends= + +# Whether snapshots count against GigaByte quota (boolean +# value) +#no_snapshot_gb_quota=false + +# The full class name of the volume transfer API class (string +# value) +#transfer_api_class=cinder.transfer.api.API + +# The full class name of the volume replication API class +# (string value) +#replication_api_class=cinder.replication.api.API + +# The full class name of the consistencygroup API class +# (string value) +#consistencygroup_api_class=cinder.consistencygroup.api.API + + +# +# Options defined in cinder.compute +# + +# The full class name of the compute API class to use (string +# value) +#compute_api_class=cinder.compute.nova.API + + +# +# Options defined in cinder.compute.nova +# + +# Match this value when searching for nova in the service +# catalog. Format is: separated values of the form: +# :: (string value) +#nova_catalog_info=compute:nova:publicURL + +# Same as nova_catalog_info, but for admin endpoint. (string +# value) +#nova_catalog_admin_info=compute:nova:adminURL + +# Override service catalog lookup with template for nova +# endpoint e.g. http://localhost:8774/v2/%(project_id)s +# (string value) +#nova_endpoint_template= + +# Same as nova_endpoint_template, but for admin endpoint. +# (string value) +#nova_endpoint_admin_template= + +# Region name of this node (string value) +#os_region_name= + +# Location of ca certificates file to use for nova client +# requests. (string value) +#nova_ca_certificates_file= + +# Allow to perform insecure SSL requests to nova (boolean +# value) +#nova_api_insecure=false + + +# +# Options defined in cinder.db.api +# + +# The backend to use for db (string value) +#db_backend=sqlalchemy + +# Services to be added to the available pool on create +# (boolean value) +#enable_new_services=true + +# Template string to be used to generate volume names (string +# value) +volume_name_template=volume-%s + +# Template string to be used to generate snapshot names +# (string value) +#snapshot_name_template=snapshot-%s + +# Template string to be used to generate backup names (string +# value) +#backup_name_template=backup-%s + + +# +# Options defined in cinder.db.base +# + +# Driver to use for database access (string value) +#db_driver=cinder.db + + +# +# Options defined in cinder.image.glance +# + +# Default core properties of image (list value) +#glance_core_properties=checksum,container_format,disk_format,image_name,image_id,min_disk,min_ram,name,size + +# A list of url schemes that can be downloaded directly via +# the direct_url. Currently supported schemes: [file]. (list +# value) +#allowed_direct_url_schemes= + + +# +# Options defined in cinder.image.image_utils +# + +# Directory used for temporary storage during image conversion +# (string value) +#image_conversion_dir=$state_path/conversion + + +# +# Options defined in cinder.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in cinder.openstack.common.lockutils +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. Default to a temp directory +# (string value) +lock_path=/var/lock/cinder + + +# +# Options defined in cinder.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +verbose=True + +# Log output to standard error. (boolean value) +#use_stderr=true + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will change in J to honor RFC5424. (boolean +# value) +#use_syslog=false + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in cinder.openstack.common.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + + +# +# Options defined in cinder.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. +# (string value) +#policy_default_rule=default + + +# +# Options defined in cinder.scheduler.driver +# + +# The scheduler host manager class to use (string value) +#scheduler_host_manager=cinder.scheduler.host_manager.HostManager + +# Maximum number of attempts to schedule an volume (integer +# value) +#scheduler_max_attempts=3 + + +# +# Options defined in cinder.scheduler.host_manager +# + +# Which filter class names to use for filtering hosts when not +# specified in the request. (list value) +#scheduler_default_filters=AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter + +# Which weigher class names to use for weighing hosts. (list +# value) +#scheduler_default_weighers=CapacityWeigher + + +# +# Options defined in cinder.scheduler.manager +# + +# Default scheduler driver to use (string value) +#scheduler_driver=cinder.scheduler.filter_scheduler.FilterScheduler + + +# +# Options defined in cinder.scheduler.scheduler_options +# + +# Absolute path to scheduler configuration JSON file. (string +# value) +#scheduler_json_config_location= + + +# +# Options defined in cinder.scheduler.simple +# + +# This configure option has been deprecated along with the +# SimpleScheduler. New scheduler is able to gather capacity +# information for each host, thus setting the maximum number +# of volume gigabytes for host is no longer needed. It's safe +# to remove this configure from cinder.conf. (integer value) +#max_gigabytes=10000 + + +# +# Options defined in cinder.scheduler.weights.capacity +# + +# Multiplier used for weighing volume capacity. Negative +# numbers mean to stack vs spread. (floating point value) +#capacity_weight_multiplier=1.0 + +# Multiplier used for weighing volume capacity. Negative +# numbers mean to stack vs spread. (floating point value) +#allocated_capacity_weight_multiplier=-1.0 + + +# +# Options defined in cinder.scheduler.weights.volume_number +# + +# Multiplier used for weighing volume number. Negative numbers +# mean to spread vs stack. (floating point value) +#volume_number_multiplier=-1.0 + + +# +# Options defined in cinder.transfer.api +# + +# The number of characters in the salt. (integer value) +#volume_transfer_salt_length=8 + +# The number of characters in the autogenerated auth key. +# (integer value) +#volume_transfer_key_length=16 + + +# +# Options defined in cinder.volume.api +# + +# Cache volume availability zones in memory for the provided +# duration in seconds (integer value) +#az_cache_duration=3600 + +# Create volume from snapshot at the host where snapshot +# resides (boolean value) +#snapshot_same_host=true + +# Ensure that the new volumes are the same AZ as snapshot or +# source volume (boolean value) +#cloned_volume_same_az=true + + +# +# Options defined in cinder.volume.driver +# + +# The maximum number of times to rescan iSER targetto find +# volume (integer value) +#num_iser_scan_tries=3 + +# The maximum number of iSER target IDs per host (integer +# value) +#iser_num_targets=100 + +# Prefix for iSER volumes (string value) +#iser_target_prefix=iqn.2010-10.org.iser.openstack: + +# The IP address that the iSER daemon is listening on (string +# value) +#iser_ip_address=$my_ip + +# The port that the iSER daemon is listening on (integer +# value) +#iser_port=3260 + +# The name of the iSER target user-land tool to use (string +# value) +#iser_helper=tgtadm + +# Number of times to attempt to run flakey shell commands +# (integer value) +#num_shell_tries=3 + +# The percentage of backend capacity is reserved (integer +# value) +#reserved_percentage=0 + +# The maximum number of iSCSI target IDs per host (integer +# value) +#iscsi_num_targets=100 + +# Prefix for iSCSI volumes (string value) +#iscsi_target_prefix=iqn.2010-10.org.openstack: + +# The IP address that the iSCSI daemon is listening on (string +# value) +#iscsi_ip_address=$my_ip + +# The port that the iSCSI daemon is listening on (integer +# value) +#iscsi_port=3260 + +# The maximum number of times to rescan targets to find volume +# (integer value) +# Deprecated group/name - [DEFAULT]/num_iscsi_scan_tries +#num_volume_device_scan_tries=3 + +# The backend name for a given driver implementation (string +# value) +#volume_backend_name= + +# Do we attach/detach volumes in cinder using multipath for +# volume to image and image to volume transfers? (boolean +# value) +#use_multipath_for_image_xfer=false + +# Method used to wipe old volumes (valid options are: none, +# zero, shred) (string value) +#volume_clear=zero + +# Size in MiB to wipe at start of old volumes. 0 => all +# (integer value) +#volume_clear_size=0 + +# The flag to pass to ionice to alter the i/o priority of the +# process used to zero a volume after deletion, for example +# "-c3" for idle only priority. (string value) +#volume_clear_ionice= + +# iSCSI target user-land tool to use. tgtadm is default, use +# lioadm for LIO iSCSI support, iseradm for the ISER protocol, +# or fake for testing. (string value) +iscsi_helper=tgtadm + +# Volume configuration file storage directory (string value) +volumes_dir=$state_path/volumes + +# IET configuration file (string value) +#iet_conf=/etc/iet/ietd.conf + +# Comma-separated list of initiator IQNs allowed to connect to +# the iSCSI target. (From Nova compute nodes.) (string value) +#lio_initiator_iqns= + +# Sets the behavior of the iSCSI target to either perform +# blockio or fileio optionally, auto can be set and Cinder +# will autodetect type of backing device (string value) +#iscsi_iotype=fileio + +# The default block size used when copying/clearing volumes +# (string value) +#volume_dd_blocksize=1M + +# The blkio cgroup name to be used to limit bandwidth of +# volume copy (string value) +#volume_copy_blkio_cgroup_name=cinder-volume-copy + +# The upper limit of bandwidth of volume copy. 0 => unlimited +# (integer value) +#volume_copy_bps_limit=0 + +# Sets the behavior of the iSCSI target to either perform +# write-back(on) or write-through(off). This parameter is +# valid if iscsi_helper is set to tgtadm or iseradm. (string +# value) +#iscsi_write_cache=on + +# The path to the client certificate key for verification, if +# the driver supports it. (string value) +#driver_client_cert_key= + +# The path to the client certificate for verification, if the +# driver supports it. (string value) +#driver_client_cert= + + +# +# Options defined in cinder.volume.drivers.block_device +# + +# List of all available devices (list value) +#available_devices= + + +# +# Options defined in cinder.volume.drivers.coraid +# + +# IP address of Coraid ESM (string value) +#coraid_esm_address= + +# User name to connect to Coraid ESM (string value) +#coraid_user=admin + +# Name of group on Coraid ESM to which coraid_user belongs +# (must have admin privilege) (string value) +#coraid_group=admin + +# Password to connect to Coraid ESM (string value) +#coraid_password=password + +# Volume Type key name to store ESM Repository Name (string +# value) +#coraid_repository_key=coraid_repository + + +# +# Options defined in cinder.volume.drivers.datera +# + +# Datera API token. (string value) +#datera_api_token= + +# Datera API port. (string value) +#datera_api_port=7717 + +# Datera API version. (string value) +#datera_api_version=1 + +# Number of replicas to create of an inode. (string value) +#datera_num_replicas=3 + + +# +# Options defined in cinder.volume.drivers.emc.emc_vmax_common +# + +# use this file for cinder emc plugin config data (string +# value) +#cinder_emc_config_file=/etc/cinder/cinder_emc_config.xml + + +# +# Options defined in cinder.volume.drivers.emc.emc_vnx_cli +# + +# VNX authentication scope type. (string value) +#storage_vnx_authentication_type=global + +# Directory path that contains the VNX security file. Make +# sure the security file is generated first. (string value) +#storage_vnx_security_file_dir= + +# Naviseccli Path. (string value) +#naviseccli_path= + +# Storage pool name. (string value) +#storage_vnx_pool_name= + +# VNX secondary SP IP Address. (string value) +#san_secondary_ip= + +# Default timeout for CLI operations in minutes. For example, +# LUN migration is a typical long running operation, which +# depends on the LUN size and the load of the array. An upper +# bound in the specific deployment can be set to avoid +# unnecessary long wait. By default, it is 365 days long. +# (integer value) +#default_timeout=525600 + +# Default max number of LUNs in a storage group. By default, +# the value is 255. (integer value) +#max_luns_per_storage_group=255 + +# To destroy storage group when the last LUN is removed from +# it. By default, the value is False. (boolean value) +#destroy_empty_storage_group=false + +# Mapping between hostname and its iSCSI initiator IP +# addresses. (string value) +#iscsi_initiators= + +# Automatically register initiators. By default, the value is +# False. (boolean value) +#initiator_auto_registration=false + + +# +# Options defined in cinder.volume.drivers.eqlx +# + +# Group name to use for creating volumes (string value) +#eqlx_group_name=group-0 + +# Timeout for the Group Manager cli command execution (integer +# value) +#eqlx_cli_timeout=30 + +# Maximum retry count for reconnection (integer value) +#eqlx_cli_max_retries=5 + +# Use CHAP authentication for targets? (boolean value) +#eqlx_use_chap=false + +# Existing CHAP account name (string value) +#eqlx_chap_login=admin + +# Password for specified CHAP account name (string value) +#eqlx_chap_password=password + +# Pool in which volumes will be created (string value) +#eqlx_pool=default + + +# +# Options defined in cinder.volume.drivers.fujitsu_eternus_dx_common +# + +# The configuration file for the Cinder SMI-S driver (string +# value) +#cinder_smis_config_file=/etc/cinder/cinder_fujitsu_eternus_dx.xml + + +# +# Options defined in cinder.volume.drivers.fusionio.ioControl +# + +# amount of time wait for iSCSI target to come online (integer +# value) +#fusionio_iocontrol_targetdelay=5 + +# number of retries for GET operations (integer value) +#fusionio_iocontrol_retry=3 + +# verify the array certificate on each transaction (boolean +# value) +#fusionio_iocontrol_verify_cert=true + + +# +# Options defined in cinder.volume.drivers.glusterfs +# + +# File with the list of available gluster shares (string +# value) +#glusterfs_shares_config=/etc/cinder/glusterfs_shares + +# Create volumes as sparsed files which take no space.If set +# to False volume is created as regular file.In such case +# volume creation takes a lot of time. (boolean value) +#glusterfs_sparsed_volumes=true + +# Create volumes as QCOW2 files rather than raw files. +# (boolean value) +#glusterfs_qcow2_volumes=false + +# Base dir containing mount points for gluster shares. (string +# value) +#glusterfs_mount_point_base=$state_path/mnt + + +# +# Options defined in cinder.volume.drivers.hds.hds +# + +# The configuration file for the Cinder HDS driver for HUS +# (string value) +#hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml + + +# +# Options defined in cinder.volume.drivers.hds.iscsi +# + +# Configuration file for HDS iSCSI cinder plugin (string +# value) +#hds_hnas_iscsi_config_file=/opt/hds/hnas/cinder_iscsi_conf.xml + + +# +# Options defined in cinder.volume.drivers.hds.nfs +# + +# Configuration file for HDS NFS cinder plugin (string value) +#hds_hnas_nfs_config_file=/opt/hds/hnas/cinder_nfs_conf.xml + + +# +# Options defined in cinder.volume.drivers.hitachi.hbsd_common +# + +# Serial number of storage system (string value) +#hitachi_serial_number= + +# Name of an array unit (string value) +#hitachi_unit_name= + +# Pool ID of storage system (integer value) +#hitachi_pool_id= + +# Thin pool ID of storage system (integer value) +#hitachi_thin_pool_id= + +# Range of logical device of storage system (string value) +#hitachi_ldev_range= + +# Default copy method of storage system (string value) +#hitachi_default_copy_method=FULL + +# Copy speed of storage system (integer value) +#hitachi_copy_speed=3 + +# Interval to check copy (integer value) +#hitachi_copy_check_interval=3 + +# Interval to check copy asynchronously (integer value) +#hitachi_async_copy_check_interval=10 + +# Control port names for HostGroup or iSCSI Target (string +# value) +#hitachi_target_ports= + +# Range of group number (string value) +#hitachi_group_range= + +# Request for creating HostGroup or iSCSI Target (boolean +# value) +#hitachi_group_request=false + + +# +# Options defined in cinder.volume.drivers.hitachi.hbsd_fc +# + +# Request for FC Zone creating HostGroup (boolean value) +#hitachi_zoning_request=false + + +# +# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm +# + +# Instance numbers for HORCM (string value) +#hitachi_horcm_numbers=200,201 + +# Username of storage system for HORCM (string value) +#hitachi_horcm_user= + +# Password of storage system for HORCM (string value) +#hitachi_horcm_password= + +# Add to HORCM configuration (boolean value) +#hitachi_horcm_add_conf=true + + +# +# Options defined in cinder.volume.drivers.hitachi.hbsd_iscsi +# + +# Add CHAP user (boolean value) +#hitachi_add_chap_user=false + +# iSCSI authentication method (string value) +#hitachi_auth_method= + +# iSCSI authentication username (string value) +#hitachi_auth_user=HBSD-CHAP-user + +# iSCSI authentication password (string value) +#hitachi_auth_password=HBSD-CHAP-password + + +# +# Options defined in cinder.volume.drivers.huawei +# + +# The configuration file for the Cinder Huawei driver (string +# value) +#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml + + +# +# Options defined in cinder.volume.drivers.ibm.gpfs +# + +# Specifies the path of the GPFS directory where Block Storage +# volume and snapshot files are stored. (string value) +#gpfs_mount_point_base= + +# Specifies the path of the Image service repository in GPFS. +# Leave undefined if not storing images in GPFS. (string +# value) +#gpfs_images_dir= + +# Specifies the type of image copy to be used. Set this when +# the Image service repository also uses GPFS so that image +# files can be transferred efficiently from the Image service +# to the Block Storage service. There are two valid values: +# "copy" specifies that a full copy of the image is made; +# "copy_on_write" specifies that copy-on-write optimization +# strategy is used and unmodified blocks of the image file are +# shared efficiently. (string value) +#gpfs_images_share_mode= + +# Specifies an upper limit on the number of indirections +# required to reach a specific block due to snapshots or +# clones. A lengthy chain of copy-on-write snapshots or +# clones can have a negative impact on performance, but +# improves space utilization. 0 indicates unlimited clone +# depth. (integer value) +#gpfs_max_clone_depth=0 + +# Specifies that volumes are created as sparse files which +# initially consume no space. If set to False, the volume is +# created as a fully allocated file, in which case, creation +# may take a significantly longer time. (boolean value) +#gpfs_sparse_volumes=true + +# Specifies the storage pool that volumes are assigned to. By +# default, the system storage pool is used. (string value) +#gpfs_storage_pool=system + + +# +# Options defined in cinder.volume.drivers.ibm.ibmnas +# + +# IP address or Hostname of NAS system. (string value) +#nas_ip= + +# User name to connect to NAS system. (string value) +#nas_login=admin + +# Password to connect to NAS system. (string value) +#nas_password= + +# SSH port to use to connect to NAS system. (integer value) +#nas_ssh_port=22 + +# Filename of private key to use for SSH authentication. +# (string value) +#nas_private_key= + +# IBMNAS platform type to be used as backend storage; valid +# values are - v7ku : for using IBM Storwize V7000 Unified, +# sonas : for using IBM Scale Out NAS, gpfs-nas : for using +# NFS based IBM GPFS deployments. (string value) +#ibmnas_platform_type=v7ku + + +# +# Options defined in cinder.volume.drivers.ibm.storwize_svc +# + +# Storage system storage pool for volumes (string value) +#storwize_svc_volpool_name=volpool + +# Storage system space-efficiency parameter for volumes +# (percentage) (integer value) +#storwize_svc_vol_rsize=2 + +# Storage system threshold for volume capacity warnings +# (percentage) (integer value) +#storwize_svc_vol_warning=0 + +# Storage system autoexpand parameter for volumes (True/False) +# (boolean value) +#storwize_svc_vol_autoexpand=true + +# Storage system grain size parameter for volumes +# (32/64/128/256) (integer value) +#storwize_svc_vol_grainsize=256 + +# Storage system compression option for volumes (boolean +# value) +#storwize_svc_vol_compression=false + +# Enable Easy Tier for volumes (boolean value) +#storwize_svc_vol_easytier=true + +# The I/O group in which to allocate volumes (integer value) +#storwize_svc_vol_iogrp=0 + +# Maximum number of seconds to wait for FlashCopy to be +# prepared. Maximum value is 600 seconds (10 minutes) (integer +# value) +#storwize_svc_flashcopy_timeout=120 + +# Connection protocol (iSCSI/FC) (string value) +#storwize_svc_connection_protocol=iSCSI + +# Configure CHAP authentication for iSCSI connections +# (Default: Enabled) (boolean value) +#storwize_svc_iscsi_chap_enabled=true + +# Connect with multipath (FC only; iSCSI multipath is +# controlled by Nova) (boolean value) +#storwize_svc_multipath_enabled=false + +# Allows vdisk to multi host mapping (boolean value) +#storwize_svc_multihostmap_enabled=true + +# Indicate whether svc driver is compatible for NPIV setup. If +# it is compatible, it will allow no wwpns being returned on +# get_conn_fc_wwpns during initialize_connection (boolean +# value) +#storwize_svc_npiv_compatibility_mode=false + +# Allow tenants to specify QOS on create (boolean value) +#storwize_svc_allow_tenant_qos=false + +# If operating in stretched cluster mode, specify the name of +# the pool in which mirrored copies are stored.Example: +# "pool2" (string value) +#storwize_svc_stretched_cluster_partner= + + +# +# Options defined in cinder.volume.drivers.ibm.xiv_ds8k +# + +# Proxy driver that connects to the IBM Storage Array (string +# value) +#xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy + +# Connection type to the IBM Storage Array +# (fibre_channel|iscsi) (string value) +#xiv_ds8k_connection_type=iscsi + +# CHAP authentication mode, effective only for iscsi +# (disabled|enabled) (string value) +#xiv_chap=disabled + + +# +# Options defined in cinder.volume.drivers.lvm +# + +# Name for the VG that will contain exported volumes (string +# value) +volume_group=cinder-volumes + +# If >0, create LVs with multiple mirrors. Note that this +# requires lvm_mirrors + 2 PVs with available space (integer +# value) +#lvm_mirrors=0 + +# Type of LVM volumes to deploy; (default or thin) (string +# value) +#lvm_type=default + + +# +# Options defined in cinder.volume.drivers.netapp.options +# + +# The vFiler unit on which provisioning of block storage +# volumes will be done. This option is only used by the driver +# when connecting to an instance with a storage family of Data +# ONTAP operating in 7-Mode and the storage protocol selected +# is iSCSI. Only use this option when utilizing the MultiStore +# feature on the NetApp storage system. (string value) +#netapp_vfiler= + +# Administrative user account name used to access the storage +# system or proxy server. (string value) +#netapp_login= + +# Password for the administrative user account specified in +# the netapp_login option. (string value) +#netapp_password= + +# This option specifies the virtual storage server (Vserver) +# name on the storage cluster on which provisioning of block +# storage volumes should occur. If using the NFS storage +# protocol, this parameter is mandatory for storage service +# catalog support (utilized by Cinder volume type extra_specs +# support). If this option is specified, the exports belonging +# to the Vserver will only be used for provisioning in the +# future. Block storage volumes on exports not belonging to +# the Vserver specified by this option will continue to +# function normally. (string value) +#netapp_vserver= + +# The hostname (or IP address) for the storage system or proxy +# server. (string value) +#netapp_server_hostname= + +# The TCP port to use for communication with the storage +# system or proxy server. Traditionally, port 80 is used for +# HTTP and port 443 is used for HTTPS; however, this value +# should be changed if an alternate port has been configured +# on the storage system or proxy server. (integer value) +#netapp_server_port=80 + +# This option is used to specify the path to the E-Series +# proxy application on a proxy server. The value is combined +# with the value of the netapp_transport_type, +# netapp_server_hostname, and netapp_server_port options to +# create the URL used by the driver to connect to the proxy +# application. (string value) +#netapp_webservice_path=/devmgr/v2 + +# This option is only utilized when the storage family is +# configured to eseries. This option is used to restrict +# provisioning to the specified controllers. Specify the value +# of this option to be a comma separated list of controller +# hostnames or IP addresses to be used for provisioning. +# (string value) +#netapp_controller_ips= + +# Password for the NetApp E-Series storage array. (string +# value) +#netapp_sa_password= + +# This option is used to restrict provisioning to the +# specified storage pools. Only dynamic disk pools are +# currently supported. Specify the value of this option to be +# a comma separated list of disk pool names to be used for +# provisioning. (string value) +#netapp_storage_pools= + +# This option is used to define how the controllers in the +# E-Series storage array will work with the particular +# operating system on the hosts that are connected to it. +# (string value) +#netapp_eseries_host_type=linux_dm_mp + +# If the percentage of available space for an NFS share has +# dropped below the value specified by this option, the NFS +# image cache will be cleaned. (integer value) +#thres_avl_size_perc_start=20 + +# When the percentage of available space on an NFS share has +# reached the percentage specified by this option, the driver +# will stop clearing files from the NFS image cache that have +# not been accessed in the last M minutes, where M is the +# value of the expiry_thres_minutes configuration option. +# (integer value) +#thres_avl_size_perc_stop=60 + +# This option specifies the threshold for last access time for +# images in the NFS image cache. When a cache cleaning cycle +# begins, images in the cache that have not been accessed in +# the last M minutes, where M is the value of this parameter, +# will be deleted from the cache to create free space on the +# NFS share. (integer value) +#expiry_thres_minutes=720 + +# This option specifies the path of the NetApp copy offload +# tool binary. Ensure that the binary has execute permissions +# set which allow the effective user of the cinder-volume +# process to execute the file. (string value) +#netapp_copyoffload_tool_path= + +# The quantity to be multiplied by the requested volume size +# to ensure enough space is available on the virtual storage +# server (Vserver) to fulfill the volume creation request. +# (floating point value) +#netapp_size_multiplier=1.2 + +# This option is only utilized when the storage protocol is +# configured to use iSCSI. This option is used to restrict +# provisioning to the specified controller volumes. Specify +# the value of this option to be a comma separated list of +# NetApp controller volume names to be used for provisioning. +# (string value) +#netapp_volume_list= + +# The storage family type used on the storage system; valid +# values are ontap_7mode for using Data ONTAP operating in +# 7-Mode, ontap_cluster for using clustered Data ONTAP, or +# eseries for using E-Series. (string value) +#netapp_storage_family=ontap_cluster + +# The storage protocol to be used on the data path with the +# storage system; valid values are iscsi or nfs. (string +# value) +#netapp_storage_protocol= + +# The transport protocol used when communicating with the +# storage system or proxy server. Valid values are http or +# https. (string value) +#netapp_transport_type=http + + +# +# Options defined in cinder.volume.drivers.nexenta.options +# + +# IP address of Nexenta SA (string value) +#nexenta_host= + +# HTTP port to connect to Nexenta REST API server (integer +# value) +#nexenta_rest_port=2000 + +# Use http or https for REST connection (default auto) (string +# value) +#nexenta_rest_protocol=auto + +# User name to connect to Nexenta SA (string value) +#nexenta_user=admin + +# Password to connect to Nexenta SA (string value) +#nexenta_password=nexenta + +# Nexenta target portal port (integer value) +#nexenta_iscsi_target_portal_port=3260 + +# SA Pool that holds all volumes (string value) +#nexenta_volume=cinder + +# IQN prefix for iSCSI targets (string value) +#nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder- + +# Prefix for iSCSI target groups on SA (string value) +#nexenta_target_group_prefix=cinder/ + +# File with the list of available nfs shares (string value) +#nexenta_shares_config=/etc/cinder/nfs_shares + +# Base directory that contains NFS share mount points (string +# value) +#nexenta_mount_point_base=$state_path/mnt + +# Enables or disables the creation of volumes as sparsed files +# that take no space. If disabled (False), volume is created +# as a regular file, which takes a long time. (boolean value) +#nexenta_sparsed_volumes=true + +# Default compression value for new ZFS folders. (string +# value) +#nexenta_volume_compression=on + +# If set True cache NexentaStor appliance volroot option +# value. (boolean value) +#nexenta_nms_cache_volroot=true + +# Enable stream compression, level 1..9. 1 - gives best speed; +# 9 - gives best compression. (integer value) +#nexenta_rrmgr_compression=0 + +# TCP Buffer size in KiloBytes. (integer value) +#nexenta_rrmgr_tcp_buf_size=4096 + +# Number of TCP connections. (integer value) +#nexenta_rrmgr_connections=2 + +# Block size for volumes (default=blank means 8KB) (string +# value) +#nexenta_blocksize= + +# Enables or disables the creation of sparse volumes (boolean +# value) +#nexenta_sparse=false + + +# +# Options defined in cinder.volume.drivers.nfs +# + +# File with the list of available nfs shares (string value) +#nfs_shares_config=/etc/cinder/nfs_shares + +# Create volumes as sparsed files which take no space.If set +# to False volume is created as regular file.In such case +# volume creation takes a lot of time. (boolean value) +#nfs_sparsed_volumes=true + +# Percent of ACTUAL usage of the underlying volume before no +# new volumes can be allocated to the volume destination. +# (floating point value) +#nfs_used_ratio=0.95 + +# This will compare the allocated to available space on the +# volume destination. If the ratio exceeds this number, the +# destination will no longer be valid. (floating point value) +#nfs_oversub_ratio=1.0 + +# Base dir containing mount points for nfs shares. (string +# value) +#nfs_mount_point_base=$state_path/mnt + +# Mount options passed to the nfs client. See section of the +# nfs man page for details. (string value) +#nfs_mount_options= + + +# +# Options defined in cinder.volume.drivers.nimble +# + +# Nimble Controller pool name (string value) +#nimble_pool_name=default + +# Nimble Subnet Label (string value) +#nimble_subnet_label=* + + +# +# Options defined in cinder.volume.drivers.prophetstor.options +# + +# DPL pool uuid in which DPL volumes are stored. (string +# value) +#dpl_pool= + +# DPL port number. (integer value) +#dpl_port=8357 + + +# +# Options defined in cinder.volume.drivers.pure +# + +# REST API authorization token. (string value) +#pure_api_token= + + +# +# Options defined in cinder.volume.drivers.rbd +# + +# The RADOS pool where rbd volumes are stored (string value) +#rbd_pool=rbd + +# The RADOS client name for accessing rbd volumes - only set +# when using cephx authentication (string value) +#rbd_user= + +# Path to the ceph configuration file (string value) +#rbd_ceph_conf= + +# Flatten volumes created from snapshots to remove dependency +# from volume to snapshot (boolean value) +#rbd_flatten_volume_from_snapshot=false + +# The libvirt uuid of the secret for the rbd_user volumes +# (string value) +#rbd_secret_uuid= + +# Directory where temporary image files are stored when the +# volume driver does not write them directly to the volume. +# (string value) +#volume_tmp_dir= + +# Maximum number of nested volume clones that are taken before +# a flatten occurs. Set to 0 to disable cloning. (integer +# value) +#rbd_max_clone_depth=5 + +# Volumes will be chunked into objects of this size (in +# megabytes). (integer value) +#rbd_store_chunk_size=4 + +# Timeout value (in seconds) used when connecting to ceph +# cluster. If value < 0, no timeout is set and default +# librados value is used. (integer value) +#rados_connect_timeout=-1 + + +# +# Options defined in cinder.volume.drivers.remotefs +# + +# IP address or Hostname of NAS system. (string value) +#nas_ip= + +# User name to connect to NAS system. (string value) +#nas_login=admin + +# Password to connect to NAS system. (string value) +#nas_password= + +# SSH port to use to connect to NAS system. (integer value) +#nas_ssh_port=22 + +# Filename of private key to use for SSH authentication. +# (string value) +#nas_private_key= + + +# +# Options defined in cinder.volume.drivers.san.hp.hp_3par_common +# + +# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 +# (string value) +#hp3par_api_url= + +# 3PAR Super user username (string value) +#hp3par_username= + +# 3PAR Super user password (string value) +#hp3par_password= + +# The CPG to use for volume creation (string value) +#hp3par_cpg=OpenStack + +# The CPG to use for Snapshots for volumes. If empty +# hp3par_cpg will be used (string value) +#hp3par_cpg_snap= + +# The time in hours to retain a snapshot. You can't delete it +# before this expires. (string value) +#hp3par_snapshot_retention= + +# The time in hours when a snapshot expires and is deleted. +# This must be larger than expiration (string value) +#hp3par_snapshot_expiration= + +# Enable HTTP debugging to 3PAR (boolean value) +#hp3par_debug=false + +# List of target iSCSI addresses to use. (list value) +#hp3par_iscsi_ips= + +# Enable CHAP authentication for iSCSI connections. (boolean +# value) +#hp3par_iscsi_chap_enabled=false + + +# +# Options defined in cinder.volume.drivers.san.hp.hp_lefthand_rest_proxy +# + +# HP LeftHand WSAPI Server Url like https://:8081/lhos (string value) +#hplefthand_api_url= + +# HP LeftHand Super user username (string value) +#hplefthand_username= + +# HP LeftHand Super user password (string value) +#hplefthand_password= + +# HP LeftHand cluster name (string value) +#hplefthand_clustername= + +# Configure CHAP authentication for iSCSI connections +# (Default: Disabled) (boolean value) +#hplefthand_iscsi_chap_enabled=false + +# Enable HTTP debugging to LeftHand (boolean value) +#hplefthand_debug=false + + +# +# Options defined in cinder.volume.drivers.san.hp.hp_msa_common +# + +# The VDisk to use for volume creation. (string value) +#msa_vdisk=OpenStack + + +# +# Options defined in cinder.volume.drivers.san.san +# + +# Use thin provisioning for SAN volumes? (boolean value) +#san_thin_provision=true + +# IP address of SAN controller (string value) +#san_ip= + +# Username for SAN controller (string value) +#san_login=admin + +# Password for SAN controller (string value) +#san_password= + +# Filename of private key to use for SSH authentication +# (string value) +#san_private_key= + +# Cluster name to use for creating volumes (string value) +#san_clustername= + +# SSH port to use with SAN (integer value) +#san_ssh_port=22 + +# Execute commands locally instead of over SSH; use if the +# volume service is running on the SAN device (boolean value) +#san_is_local=false + +# SSH connection timeout in seconds (integer value) +#ssh_conn_timeout=30 + +# Minimum ssh connections in the pool (integer value) +#ssh_min_pool_conn=1 + +# Maximum ssh connections in the pool (integer value) +#ssh_max_pool_conn=5 + + +# +# Options defined in cinder.volume.drivers.san.solaris +# + +# The ZFS path under which to create zvols for volumes. +# (string value) +#san_zfs_volume_base=rpool/ + + +# +# Options defined in cinder.volume.drivers.scality +# + +# Path or URL to Scality SOFS configuration file (string +# value) +#scality_sofs_config= + +# Base dir where Scality SOFS shall be mounted (string value) +#scality_sofs_mount_point=$state_path/scality + +# Path from Scality SOFS root to volume dir (string value) +#scality_sofs_volume_dir=cinder/volumes + + +# +# Options defined in cinder.volume.drivers.smbfs +# + +# File with the list of available smbfs shares. (string value) +#smbfs_shares_config=/etc/cinder/smbfs_shares + +# Default format that will be used when creating volumes if no +# volume format is specified. Can be set to: raw, qcow2, vhd +# or vhdx. (string value) +#smbfs_default_volume_format=qcow2 + +# Create volumes as sparsed files which take no space rather +# than regular files when using raw format, in which case +# volume creation takes lot of time. (boolean value) +#smbfs_sparsed_volumes=true + +# Percent of ACTUAL usage of the underlying volume before no +# new volumes can be allocated to the volume destination. +# (floating point value) +#smbfs_used_ratio=0.95 + +# This will compare the allocated to available space on the +# volume destination. If the ratio exceeds this number, the +# destination will no longer be valid. (floating point value) +#smbfs_oversub_ratio=1.0 + +# Base dir containing mount points for smbfs shares. (string +# value) +#smbfs_mount_point_base=$state_path/mnt + +# Mount options passed to the smbfs client. See mount.cifs man +# page for details. (string value) +#smbfs_mount_options=noperm,file_mode=0775,dir_mode=0775 + + +# +# Options defined in cinder.volume.drivers.solidfire +# + +# Set 512 byte emulation on volume creation; (boolean value) +#sf_emulate_512=true + +# Allow tenants to specify QOS on create (boolean value) +#sf_allow_tenant_qos=false + +# Create SolidFire accounts with this prefix. Any string can +# be used here, but the string "hostname" is special and will +# create a prefix using the cinder node hostsname (previous +# default behavior). The default is NO prefix. (string value) +#sf_account_prefix= + +# SolidFire API port. Useful if the device api is behind a +# proxy on a different port. (integer value) +#sf_api_port=443 + + +# +# Options defined in cinder.volume.drivers.vmware.vmdk +# + +# IP address for connecting to VMware ESX/VC server. (string +# value) +#vmware_host_ip= + +# Username for authenticating with VMware ESX/VC server. +# (string value) +#vmware_host_username= + +# Password for authenticating with VMware ESX/VC server. +# (string value) +#vmware_host_password= + +# Optional VIM service WSDL Location e.g +# http:///vimService.wsdl. Optional over-ride to +# default location for bug work-arounds. (string value) +#vmware_wsdl_location= + +# Number of times VMware ESX/VC server API must be retried +# upon connection related issues. (integer value) +#vmware_api_retry_count=10 + +# The interval (in seconds) for polling remote tasks invoked +# on VMware ESX/VC server. (floating point value) +#vmware_task_poll_interval=0.5 + +# Name for the folder in the VC datacenter that will contain +# cinder volumes. (string value) +#vmware_volume_folder=cinder-volumes + +# Timeout in seconds for VMDK volume transfer between Cinder +# and Glance. (integer value) +#vmware_image_transfer_timeout_secs=7200 + +# Max number of objects to be retrieved per batch. Query +# results will be obtained in batches from the server and not +# in one shot. Server may still limit the count to something +# less than the configured value. (integer value) +#vmware_max_objects_retrieval=100 + +# Optional string specifying the VMware VC server version. The +# driver attempts to retrieve the version from VMware VC +# server. Set this configuration only if you want to override +# the VC server version. (string value) +#vmware_host_version= + +# Directory where virtual disks are stored during volume +# backup and restore. (string value) +#vmware_tmp_dir=/tmp + + +# +# Options defined in cinder.volume.drivers.windows.windows +# + +# Path to store VHD backed volumes (string value) +#windows_iscsi_lun_path=C:\iSCSIVirtualDisks + + +# +# Options defined in cinder.volume.drivers.zadara +# + +# Management IP of Zadara VPSA (string value) +#zadara_vpsa_ip= + +# Zadara VPSA port number (string value) +#zadara_vpsa_port= + +# Use SSL connection (boolean value) +#zadara_vpsa_use_ssl=false + +# User name for the VPSA (string value) +#zadara_user= + +# Password for the VPSA (string value) +#zadara_password= + +# Name of VPSA storage pool for volumes (string value) +#zadara_vpsa_poolname= + +# Default thin provisioning policy for volumes (boolean value) +#zadara_vol_thin=true + +# Default encryption policy for volumes (boolean value) +#zadara_vol_encrypt=false + +# Default template for VPSA volume names (string value) +#zadara_vol_name_template=OS_%s + +# Automatically detach from servers on volume delete (boolean +# value) +#zadara_vpsa_auto_detach_on_delete=true + +# Don't halt on deletion of non-existing volumes (boolean +# value) +#zadara_vpsa_allow_nonexistent_delete=true + + +# +# Options defined in cinder.volume.drivers.zfssa.zfssaiscsi +# + +# Storage pool name. (string value) +#zfssa_pool= + +# Project name. (string value) +#zfssa_project= + +# Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k. +# (string value) +#zfssa_lun_volblocksize=8k + +# Flag to enable sparse (thin-provisioned): True, False. +# (boolean value) +#zfssa_lun_sparse=false + +# Data compression-off, lzjb, gzip-2, gzip, gzip-9. (string +# value) +#zfssa_lun_compression= + +# Synchronous write bias-latency, throughput. (string value) +#zfssa_lun_logbias= + +# iSCSI initiator group. (string value) +#zfssa_initiator_group= + +# iSCSI initiator IQNs. (comma separated) (string value) +#zfssa_initiator= + +# iSCSI initiator CHAP user. (string value) +#zfssa_initiator_user= + +# iSCSI initiator CHAP password. (string value) +#zfssa_initiator_password= + +# iSCSI target group name. (string value) +#zfssa_target_group=tgt-grp + +# iSCSI target CHAP user. (string value) +#zfssa_target_user= + +# iSCSI target CHAP password. (string value) +#zfssa_target_password= + +# iSCSI target portal (Data-IP:Port, w.x.y.z:3260). (string +# value) +#zfssa_target_portal= + +# Network interfaces of iSCSI targets. (comma separated) +# (string value) +#zfssa_target_interfaces= + +# REST connection timeout. (seconds) (integer value) +#zfssa_rest_timeout= + + +# +# Options defined in cinder.volume.manager +# + +# Driver to use for volume creation (string value) +#volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver + +# Timeout for creating the volume to migrate to when +# performing volume migration (seconds) (integer value) +#migration_create_volume_timeout_secs=300 + +# Offload pending volume delete during volume service startup +# (boolean value) +#volume_service_inithost_offload=false + +# FC Zoning mode configured (string value) +#zoning_mode=none + +# User defined capabilities, a JSON formatted string +# specifying key/value pairs. (string value) +#extra_capabilities={} + + +[BRCD_FABRIC_EXAMPLE] + +# +# Options defined in cinder.zonemanager.drivers.brocade.brcd_fabric_opts +# + +# Management IP of fabric (string value) +#fc_fabric_address= + +# Fabric user ID (string value) +#fc_fabric_user= + +# Password for user (string value) +#fc_fabric_password= + +# Connecting port (integer value) +#fc_fabric_port=22 + +# overridden zoning policy (string value) +#zoning_policy=initiator-target + +# overridden zoning activation state (boolean value) +#zone_activate=true + +# overridden zone name prefix (string value) +#zone_name_prefix= + +# Principal switch WWN of the fabric (string value) +#principal_switch_wwn= + + +[CISCO_FABRIC_EXAMPLE] + +# +# Options defined in cinder.zonemanager.drivers.cisco.cisco_fabric_opts +# + +# Management IP of fabric (string value) +#cisco_fc_fabric_address= + +# Fabric user ID (string value) +#cisco_fc_fabric_user= + +# Password for user (string value) +#cisco_fc_fabric_password= + +# Connecting port (integer value) +#cisco_fc_fabric_port=22 + +# overridden zoning policy (string value) +#cisco_zoning_policy=initiator-target + +# overridden zoning activation state (boolean value) +#cisco_zone_activate=true + +# overridden zone name prefix (string value) +#cisco_zone_name_prefix= + +# VSAN of the Fabric (string value) +#cisco_zoning_vsan= + + +[database] + +# +# Options defined in oslo.db +# + +# The file name to use with SQLite. (string value) +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the +# database. (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +connection=postgresql://##CINDER_DB_USER##:##CINDER_DB_PASSWORD##@onenode/cinder + +# The SQLAlchemy connection string to use to connect to the +# slave database. (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum number of database connection retries during +# startup. Set to -1 to specify an infinite retry count. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, +# 100=Everything. (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost. (boolean value) +#use_db_reconnect=false + +# Seconds between database connection retries. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between database connection +# retries up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between +# database connection retries. (integer value) +#db_max_retry_interval=10 + +# Maximum database connection retries before error is raised. +# Set to -1 to specify an infinite retry count. (integer +# value) +#db_max_retries=20 + + +# +# Options defined in oslo.db.concurrency +# + +# Enable the experimental use of thread pooling for all DB API +# calls (boolean value) +# Deprecated group/name - [DEFAULT]/dbapi_use_tpool +#use_tpool=false + + +[fc-zone-manager] + +# +# Options defined in cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver +# + +# Southbound connector for zoning operation (string value) +#brcd_sb_connector=cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.BrcdFCZoneClientCLI + + +# +# Options defined in cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver +# + +# Southbound connector for zoning operation (string value) +#cisco_sb_connector=cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI + + +# +# Options defined in cinder.zonemanager.fc_zone_manager +# + +# FC Zone Driver responsible for zone management (string +# value) +#zone_driver=cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver + +# Zoning policy configured by user (string value) +#zoning_policy=initiator-target + +# Comma separated list of fibre channel fabric names. This +# list of names is used to retrieve other SAN credentials for +# connecting to each SAN fabric (string value) +#fc_fabric_names= + +# FC San Lookup Service (string value) +#fc_san_lookup_service=cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService + + +[keymgr] + +# +# Options defined in cinder.keymgr +# + +# The full class name of the key manager API class (string +# value) +#api_class=cinder.keymgr.conf_key_mgr.ConfKeyManager + + +# +# Options defined in cinder.keymgr.conf_key_mgr +# + +# Fixed key returned by key manager, specified in hex (string +# value) +#fixed_key= + + +# +# Options defined in cinder.keymgr.key_mgr +# + +# Authentication url for encryption service. (string value) +#encryption_auth_url=http://localhost:5000/v2.0 + +# Url for encryption service. (string value) +#encryption_api_url=http://localhost:9311/v1 + + +[keystone_authtoken] + +# +# Options defined in keystonemiddleware.auth_token +# + +# Prefix to prepend at the beginning of the path. Deprecated, +# use identity_uri. (string value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint. Deprecated, +# use identity_uri. (string value) +auth_host=##OPENSTACK_AUTH_HOST## + +# Port of the admin Identity API endpoint. Deprecated, use +# identity_uri. (integer value) +auth_port=##OPENSTACK_AUTH_PORT## + +# Protocol of the admin Identity API endpoint (http or https). +# Deprecated, use identity_uri. (string value) +auth_protocol=http + +# Complete public Identity API endpoint (string value) +auth_uri=$auth_protocol://$auth_host:$auth_port + +# Complete admin Identity API endpoint. This should specify +# the unversioned root endpoint e.g. https://localhost:35357/ +# (string value) +#identity_uri= + +# API version of the admin Identity API endpoint (string +# value) +auth_version=v2.0 + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# This option is deprecated and may be removed in a future +# release. Single shared secret with the Keystone +# configuration used for bootstrapping a Keystone +# installation, or otherwise bypassing the normal +# authentication process. This option should not be used, use +# `admin_user` and `admin_password` instead. (string value) +#admin_token= + +# Keystone account username (string value) +admin_user=##CINDER_USER## + +# Keystone account password (string value) +admin_password=##CINDER_PASSWORD## + +# Keystone service account tenant name to validate user tokens +# (string value) +admin_tenant_name=service + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for +# caching. If left undefined, tokens will instead be cached +# in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating +# tokens, the middleware caches previously-seen tokens for a +# configurable duration (in seconds). Set to -1 to disable +# caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens +# is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache +# duration may significantly reduce performance. (integer +# value) +#revocation_cache_time=10 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) number of seconds memcached server is considered +# dead before it is tried again. (integer value) +#memcache_pool_dead_retry=300 + +# (optional) max total number of open connections to every +# memcached server. (integer value) +#memcache_pool_maxsize=10 + +# (optional) socket timeout in seconds for communicating with +# a memcache server. (integer value) +#memcache_pool_socket_timeout=3 + +# (optional) number of seconds a connection to memcached is +# held unused in the pool before it is closed. (integer value) +#memcache_pool_unused_timeout=60 + +# (optional) number of seconds that an operation will wait to +# get a memcache client connection from the pool. (integer +# value) +#memcache_pool_conn_get_timeout=10 + +# (optional) use the advanced (eventlet safe) memcache client +# pool. The advanced pool will only work under python 2.x. +# (boolean value) +#memcache_use_advanced_pool=false + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + +# If true, the revocation list will be checked for cached +# tokens. This requires that PKI tokens are configured on the +# Keystone server. (boolean value) +#check_revocations_for_cached=false + +# Hash algorithms to use for hashing PKI tokens. This may be a +# single algorithm or multiple. The algorithms are those +# supported by Python standard hashlib.new(). The hashes will +# be tried in the order given, so put the preferred one first +# for performance. The result of the first hash will be stored +# in the cache. This will typically be set to multiple values +# only while migrating from a less secure algorithm to a more +# secure one. Once all the old tokens are expired this option +# should be set to a single value for better performance. +# (list value) +#hash_algorithms=md5 + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=localhost + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[profiler] + +# +# Options defined in cinder.service +# + +# If False fully disable profiling feature. (boolean value) +#profiler_enabled=false + +# If False doesn't trace SQL requests. (boolean value) +#trace_sqlalchemy=false + + +[ssl] + +# +# Options defined in cinder.openstack.common.sslutils +# + +# CA certificate file to use to verify connecting clients +# (string value) +#ca_file= + +# Certificate file to use when starting the server securely +# (string value) +#cert_file= + +# Private key file to use when starting the server securely +# (string value) +#key_file= diff --git a/openstack/etc/cinder/policy.json b/openstack/etc/cinder/policy.json new file mode 100644 index 00000000..96f0a73b --- /dev/null +++ b/openstack/etc/cinder/policy.json @@ -0,0 +1,80 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "admin_api": "is_admin:True", + + "volume:create": "", + "volume:get_all": "", + "volume:get_volume_metadata": "", + "volume:get_volume_admin_metadata": "rule:admin_api", + "volume:delete_volume_admin_metadata": "rule:admin_api", + "volume:update_volume_admin_metadata": "rule:admin_api", + "volume:get_snapshot": "", + "volume:get_all_snapshots": "", + "volume:extend": "", + "volume:update_readonly_flag": "", + "volume:retype": "", + + "volume_extension:types_manage": "rule:admin_api", + "volume_extension:types_extra_specs": "rule:admin_api", + "volume_extension:volume_type_encryption": "rule:admin_api", + "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", + "volume_extension:extended_snapshot_attributes": "", + "volume_extension:volume_image_metadata": "", + + "volume_extension:quotas:show": "", + "volume_extension:quotas:update": "rule:admin_api", + "volume_extension:quota_classes": "", + + "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", + "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", + "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", + "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", + "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", + "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", + "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", + "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", + + "volume_extension:volume_host_attribute": "rule:admin_api", + "volume_extension:volume_tenant_attribute": "rule:admin_or_owner", + "volume_extension:volume_mig_status_attribute": "rule:admin_api", + "volume_extension:hosts": "rule:admin_api", + "volume_extension:services": "rule:admin_api", + + "volume_extension:volume_manage": "rule:admin_api", + "volume_extension:volume_unmanage": "rule:admin_api", + + "volume:services": "rule:admin_api", + + "volume:create_transfer": "", + "volume:accept_transfer": "", + "volume:delete_transfer": "", + "volume:get_all_transfers": "", + + "volume_extension:replication:promote": "rule:admin_api", + "volume_extension:replication:reenable": "rule:admin_api", + + "backup:create" : "", + "backup:delete": "", + "backup:get": "", + "backup:get_all": "", + "backup:restore": "", + "backup:backup-import": "rule:admin_api", + "backup:backup-export": "rule:admin_api", + + "snapshot_extension:snapshot_actions:update_snapshot_status": "", + + "consistencygroup:create" : "group:nobody", + "consistencygroup:delete": "group:nobody", + "consistencygroup:get": "group:nobody", + "consistencygroup:get_all": "group:nobody", + + "consistencygroup:create_cgsnapshot" : "", + "consistencygroup:delete_cgsnapshot": "", + "consistencygroup:get_cgsnapshot": "", + "consistencygroup:get_all_cgsnapshots": "", + + "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api" +} diff --git a/openstack/etc/cinder/rootwrap.conf b/openstack/etc/cinder/rootwrap.conf new file mode 100644 index 00000000..001b90af --- /dev/null +++ b/openstack/etc/cinder/rootwrap.conf @@ -0,0 +1,27 @@ +# Configuration for cinder-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR diff --git a/openstack/etc/cinder/rootwrap.d/volume.filters b/openstack/etc/cinder/rootwrap.d/volume.filters new file mode 100644 index 00000000..2d23743f --- /dev/null +++ b/openstack/etc/cinder/rootwrap.d/volume.filters @@ -0,0 +1,157 @@ +# cinder-rootwrap command filters for volume nodes +# This file should be owned by (and only-writeable by) the root user + +[Filters] +# cinder/volume/iscsi.py: iscsi_helper '--op' ... +ietadm: CommandFilter, ietadm, root +tgtadm: CommandFilter, tgtadm, root +tgt-admin: CommandFilter, tgt-admin, root +cinder-rtstool: CommandFilter, cinder-rtstool, root + +# LVM related show commands +pvs: EnvFilter, env, root, LC_ALL=C, pvs +vgs: EnvFilter, env, root, LC_ALL=C, vgs +lvs: EnvFilter, env, root, LC_ALL=C, lvs +lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay + +# cinder/volume/driver.py: 'lvcreate', '-L', sizestr, '-n', volume_name,.. +# cinder/volume/driver.py: 'lvcreate', '-L', ... +lvcreate: CommandFilter, lvcreate, root + +# cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,... +dd: CommandFilter, dd, root + +# cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ... +lvremove: CommandFilter, lvremove, root + +# cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'... +lvrename: CommandFilter, lvrename, root + +# cinder/volume/driver.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ... +lvextend: CommandFilter, lvextend, root + +# cinder/brick/local_dev/lvm.py: 'lvchange -a y -K ' +lvchange: CommandFilter, lvchange, root + +# cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',... +# cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... +iscsiadm: CommandFilter, iscsiadm, root + +# cinder/volume/drivers/lvm.py: 'shred', '-n3' +# cinder/volume/drivers/lvm.py: 'shred', '-n0', '-z', '-s%dMiB' +shred: CommandFilter, shred, root + +# cinder/volume/utils.py: utils.temporary_chown(path, 0) +chown: CommandFilter, chown, root + +# cinder/volume/utils.py: copy_volume(..., ionice='...') +ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7] +ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3] + +# cinder/volume/utils.py: setup_blkio_cgroup() +cgcreate: CommandFilter, cgcreate, root +cgset: CommandFilter, cgset, root +cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+ + +# cinder/volume/driver.py +dmsetup: CommandFilter, dmsetup, root +ln: CommandFilter, ln, root + +# cinder/image/image_utils.py +qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img +qemu-img_convert: CommandFilter, qemu-img, root + +udevadm: CommandFilter, udevadm, root + +# cinder/volume/driver.py: utils.read_file_as_root() +cat: CommandFilter, cat, root + +# cinder/volume/nfs.py +stat: CommandFilter, stat, root +mount: CommandFilter, mount, root +df: CommandFilter, df, root +du: CommandFilter, du, root +truncate: CommandFilter, truncate, root +chmod: CommandFilter, chmod, root +rm: CommandFilter, rm, root + +# cinder/volume/drivers/netapp/nfs.py: +netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+ + +# cinder/volume/drivers/glusterfs.py +chgrp: CommandFilter, chgrp, root +umount: CommandFilter, umount, root + +# cinder/volumes/drivers/hds/hds.py: +hus-cmd: CommandFilter, hus-cmd, root +hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root + +# cinder/volumes/drivers/hds/hnas_backend.py +ssc: CommandFilter, ssc, root + +# cinder/brick/initiator/connector.py: +ls: CommandFilter, ls, root +tee: CommandFilter, tee, root +multipath: CommandFilter, multipath, root +systool: CommandFilter, systool, root + +# cinder/volume/drivers/block_device.py +blockdev: CommandFilter, blockdev, root + +# cinder/volume/drivers/ibm/gpfs.py +mv: CommandFilter, mv, root +mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root +mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root +mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root +mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root +mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root +mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root +mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root +mkfs: CommandFilter, mkfs, root + +# cinder/volume/drivers/ibm/gpfs.py +# cinder/volume/drivers/ibm/ibmnas.py +find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -inum, \d+ + +# cinder/brick/initiator/connector.py: +aoe-revalidate: CommandFilter, aoe-revalidate, root +aoe-discover: CommandFilter, aoe-discover, root +aoe-flush: CommandFilter, aoe-flush, root + +# cinder/brick/initiator/linuxscsi.py: +sg_scan: CommandFilter, sg_scan, root + +#cinder/backup/services/tsm.py +dsmc:CommandFilter,/usr/bin/dsmc,root + +# cinder/volume/drivers/hitachi/hbsd_horcm.py +raidqry: CommandFilter, raidqry, root +raidcom: CommandFilter, raidcom, root +pairsplit: CommandFilter, pairsplit, root +paircreate: CommandFilter, paircreate, root +pairdisplay: CommandFilter, pairdisplay, root +pairevtwait: CommandFilter, pairevtwait, root +horcmstart.sh: CommandFilter, horcmstart.sh, root +horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root +horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr + +# cinder/volume/drivers/hitachi/hbsd_snm2.py +auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman +auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref +auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef +aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1 +auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn +auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap +autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap +aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol +auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd +auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel +auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize +auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser +autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef +autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt +autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini +auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi +audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool +aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal +aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon diff --git a/openstack/etc/glance/glance-api-paste.ini b/openstack/etc/glance/glance-api-paste.ini new file mode 100644 index 00000000..e4baa269 --- /dev/null +++ b/openstack/etc/glance/glance-api-paste.ini @@ -0,0 +1,72 @@ +# Use this pipeline for no auth or image caching - DEFAULT +[pipeline:glance-api] +pipeline = versionnegotiation unauthenticated-context rootapp + +# Use this pipeline for image caching and no auth +[pipeline:glance-api-caching] +pipeline = versionnegotiation unauthenticated-context cache rootapp + +# Use this pipeline for caching w/ management interface but no auth +[pipeline:glance-api-cachemanagement] +pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp + +# Use this pipeline for keystone auth +[pipeline:glance-api-keystone] +pipeline = versionnegotiation authtoken context rootapp + +# Use this pipeline for keystone auth with image caching +[pipeline:glance-api-keystone+caching] +pipeline = versionnegotiation authtoken context cache rootapp + +# Use this pipeline for keystone auth with caching and cache management +[pipeline:glance-api-keystone+cachemanagement] +pipeline = versionnegotiation authtoken context cache cachemanage rootapp + +# Use this pipeline for authZ only. This means that the registry will treat a +# user as authenticated without making requests to keystone to reauthenticate +# the user. +[pipeline:glance-api-trusted-auth] +pipeline = versionnegotiation context rootapp + +# Use this pipeline for authZ only. This means that the registry will treat a +# user as authenticated without making requests to keystone to reauthenticate +# the user and uses cache management +[pipeline:glance-api-trusted-auth+cachemanagement] +pipeline = versionnegotiation context cache cachemanage rootapp + +[composite:rootapp] +paste.composite_factory = glance.api:root_app_factory +/: apiversions +/v1: apiv1app +/v2: apiv2app + +[app:apiversions] +paste.app_factory = glance.api.versions:create_resource + +[app:apiv1app] +paste.app_factory = glance.api.v1.router:API.factory + +[app:apiv2app] +paste.app_factory = glance.api.v2.router:API.factory + +[filter:versionnegotiation] +paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory + +[filter:cache] +paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory + +[filter:cachemanage] +paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory + +[filter:context] +paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory + +[filter:unauthenticated-context] +paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +delay_auth_decision = true + +[filter:gzip] +paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory diff --git a/openstack/etc/glance/glance-api.conf b/openstack/etc/glance/glance-api.conf new file mode 100644 index 00000000..c30ce62f --- /dev/null +++ b/openstack/etc/glance/glance-api.conf @@ -0,0 +1,691 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Which backend scheme should Glance use by default is not specified +# in a request to add a new image to Glance? Known schemes are determined +# by the known_stores option below. +# Default: 'file' +default_store = file + +# List of which store classes and store class locations are +# currently known to glance at startup. +# Existing but disabled stores: +# glance.store.rbd.Store, +# glance.store.s3.Store, +# glance.store.swift.Store, +# glance.store.sheepdog.Store, +# glance.store.cinder.Store, +# glance.store.gridfs.Store, +# glance.store.vmware_datastore.Store, +#known_stores = glance.store.filesystem.Store, +# glance.store.http.Store + + +# Maximum image size (in bytes) that may be uploaded through the +# Glance API server. Defaults to 1 TB. +# WARNING: this value should only be increased after careful consideration +# and must be set to a value under 8 EB (9223372036854775808). +#image_size_cap = 1099511627776 + +# Address to bind the API server +bind_host = 0.0.0.0 + +# Port the bind the API server to +bind_port = 9292 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/glance/api.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# API to use for accessing data. Default value points to sqlalchemy +# package, it is also possible to use: glance.db.registry.api +# data_api = glance.db.sqlalchemy.api + +# Number of Glance API worker processes to start. +# On machines with more than one CPU increasing this value +# may improve performance (especially if using SSL with +# compression turned on). It is typically recommended to set +# this value to the number of CPUs present on your machine. +workers = 1 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large tokens +# (typically those generated by the Keystone v3 API with big service +# catalogs) +# max_header_line = 16384 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Allow unauthenticated users to access the API with read-only +# privileges. This only applies when using ContextMiddleware. +#allow_anonymous_access = False + +# Allow access to version 1 of glance api +#enable_v1_api = True + +# Allow access to version 2 of glance api +#enable_v2_api = True + +# Return the URL that references where the data is stored on +# the backend storage system. For example, if using the +# file system store a URL of 'file:///path/to/image' will +# be returned to the user in the 'direct_url' meta-data field. +# The default value is false. +#show_image_direct_url = False + +# Send headers containing user and tenant information when making requests to +# the v1 glance registry. This allows the registry to function as if a user is +# authenticated without the need to authenticate a user itself using the +# auth_token middleware. +# The default value is false. +#send_identity_headers = False + +# Supported values for the 'container_format' image attribute +#container_formats=ami,ari,aki,bare,ovf,ova + +# Supported values for the 'disk_format' image attribute +#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso + +# Directory to use for lock files. Default to a temp directory +# (string value). This setting needs to be the same for both +# glance-scrubber and glance-api. +#lock_path= + +# Property Protections config file +# This file contains the rules for property protections and the roles/policies +# associated with it. +# If this config value is not specified, by default, property protections +# won't be enforced. +# If a value is specified and the file is not found, then the glance-api +# service will not start. +#property_protection_file = + +# Specify whether 'roles' or 'policies' are used in the +# property_protection_file. +# The default value for property_protection_rule_format is 'roles'. +#property_protection_rule_format = roles + +# Specifies how long (in hours) a task is supposed to live in the tasks DB +# after succeeding or failing before getting soft-deleted. +# The default value for task_time_to_live is 48 hours. +# task_time_to_live = 48 + +# This value sets what strategy will be used to determine the image location +# order. Currently two strategies are packaged with Glance 'location_order' +# and 'store_type'. +#location_strategy = location_order + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL0 + +# ================= SSL Options =============================== + +# Certificate file to use when starting API server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> + +# ============ Registry Options =============================== + +# Address to find the registry server +registry_host = 0.0.0.0 + +# Port the registry server is listening on +registry_port = 9191 + +# What protocol to use when connecting to the registry server? +# Set to https for secure HTTP communication +registry_client_protocol = http + +# The path to the key file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file +#registry_client_key_file = /path/to/key/file + +# The path to the cert file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file +#registry_client_cert_file = /path/to/cert/file + +# The path to the certifying authority cert file to use in SSL connections +# to the registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file +#registry_client_ca_file = /path/to/ca/file + +# When using SSL in connections to the registry server, do not require +# validation via a certifying authority. This is the registry's equivalent of +# specifying --insecure on the command line using glanceclient for the API +# Default: False +#registry_client_insecure = False + +# The period of time, in seconds, that the API server will wait for a registry +# request to complete. A value of '0' implies no timeout. +# Default: 600 +#registry_client_timeout = 600 + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create = False + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug = True + +# Pass the user's token through for API requests to the registry. +# Default: True +#use_user_token = True + +# If 'use_user_token' is not in effect then admin credentials +# can be specified. Requests to the registry on behalf of +# the API will use these credentials. +# Admin user name +#admin_user = None +# Admin password +#admin_password = None +# Admin tenant name +#admin_tenant_name = None +# Keystone endpoint +#auth_url = None +# Keystone region +#auth_region = None +# Auth strategy +#auth_strategy = keystone + +# ============ Notification System Options ===================== + +# Driver or drivers to handle sending notifications. Set to +# 'messaging' to send notifications to a message queue. +# notification_driver = noop + +# Default publisher_id for outgoing notifications. +# default_publisher_id = image.localhost + +# Messaging driver used for 'messaging' notifications driver +rpc_backend=rabbit + +# Configuration options if sending notifications via rabbitmq (these are +# the defaults) +rabbit_host = ##RABBITMQ_HOST## +rabbit_port = ##RABBITMQ_PORT## +rabbit_use_ssl = false +rabbit_userid = ##RABBITMQ_USER## +rabbit_password = ##RABBITMQ_PASSWORD## +rabbit_virtual_host = / +rabbit_notification_exchange = glance +rabbit_notification_topic = notifications +rabbit_durable_queues = False + +# Configuration options if sending notifications via Qpid (these are +# the defaults) +#qpid_notification_exchange = glance +#qpid_notification_topic = notifications +#qpid_hostname = localhost +#qpid_port = 5672 +#qpid_username = +#qpid_password = +#qpid_sasl_mechanisms = +#qpid_reconnect_timeout = 0 +#qpid_reconnect_limit = 0 +#qpid_reconnect_interval_min = 0 +#qpid_reconnect_interval_max = 0 +#qpid_reconnect_interval = 0 +#qpid_heartbeat = 5 +# Set to 'ssl' to enable SSL +#qpid_protocol = tcp +#qpid_tcp_nodelay = True + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +filesystem_store_datadir = /var/lib/glance/images/ + +# A list of directories where image data can be stored. +# This option may be specified multiple times for specifying multiple store +# directories. Either one of filesystem_store_datadirs or +# filesystem_store_datadir option is required. A priority number may be given +# after each directory entry, separated by a ":". +# When adding an image, the highest priority directory will be selected, unless +# there is not enough space available in cases where the image size is already +# known. If no priority is given, it is assumed to be zero and the directory +# will be considered for selection last. If multiple directories have the same +# priority, then the one with the most free space available is selected. +# If same store is specified multiple times then BadStoreConfiguration +# exception will be raised. +#filesystem_store_datadirs = /var/lib/glance/images/:1 + +# A path to a JSON file that contains metadata describing the storage +# system. When show_multiple_locations is True the information in this +# file will be returned with any location that is contained in this +# store. +#filesystem_store_metadata_file = None + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +swift_store_auth_version = 2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like 'localhost:8080/v1.0/' +swift_store_auth_address = localhost:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +swift_store_user = jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +swift_store_key = a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +swift_store_container = glance + +# Do we create the container if it does not exist? +swift_store_create_container_on_put = False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +swift_store_large_object_size = 5120 + +# swift_store_config_file = glance-swift.conf +# This file contains references for each of the configured +# Swift accounts/backing stores. If used, this option can prevent +# credentials being stored in the database. Using Swift references +# is disabled if this config is left blank. + +# The reference to the default Swift parameters to use for adding new images. +# default_swift_reference = 'ref1' + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +swift_store_large_object_chunk_size = 200 + +# Whether to use ServiceNET to communicate with the Swift storage servers. +# (If you aren't RACKSPACE, leave this False!) +# +# To use ServiceNET for authentication, prefix hostname of +# `swift_store_auth_address` with 'snet-'. +# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ +swift_enable_snet = False + +# If set to True enables multi-tenant storage mode which causes Glance images +# to be stored in tenant specific Swift accounts. +#swift_store_multi_tenant = False + +# A list of swift ACL strings that will be applied as both read and +# write ACLs to the containers created by Glance in multi-tenant +# mode. This grants the specified tenants/users read and write access +# to all newly created image objects. The standard swift ACL string +# formats are allowed, including: +# : +# : +# *: +# Multiple ACLs can be combined using a comma separated list, for +# example: swift_store_admin_tenants = service:glance,*:admin +#swift_store_admin_tenants = + +# The region of the swift endpoint to be used for single tenant. This setting +# is only necessary if the tenant has multiple swift endpoints. +#swift_store_region = + +# If set to False, disables SSL layer compression of https swift requests. +# Setting to 'False' may improve performance for images which are already +# in a compressed format, eg qcow2. If set to True, enables SSL layer +# compression (provided it is supported by the target swift proxy). +#swift_store_ssl_compression = True + +# The number of times a Swift download will be retried before the +# request fails +#swift_store_retry_get_count = 0 + +# Bypass SSL verification for Swift +#swift_store_auth_insecure = False + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +s3_store_host = localhost:8080/v1.0/ + +# User to authenticate against the S3 authentication service +s3_store_access_key = <20-char AWS access key> + +# Auth key for the user authenticating against the +# S3 authentication service +s3_store_secret_key = <40-char AWS secret key> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +s3_store_bucket = glance + +# Do we create the bucket if it does not exist? +s3_store_create_bucket_on_put = False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +#s3_store_object_buffer_dir = /path/to/dir + +# When forming a bucket url, boto will either set the bucket name as the +# subdomain or as the first token of the path. Amazon's S3 service will +# accept it as the subdomain, but Swift's S3 middleware requires it be +# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'. +#s3_store_bucket_url_format = subdomain + +# Size, in MB, should S3 start chunking image files +# and do a multipart upload in S3. The default is 100MB. +#s3_store_large_object_size = 100 + +# Multipart upload part size, in MB, should S3 use when uploading +# parts. The size must be greater than or equal to +# 5MB. The default is 10MB. +#s3_store_large_object_chunk_size = 10 + +# The number of thread pools to perform a multipart upload +# in S3. The default is 10. +#s3_store_thread_pools = 10 + +# ============ RBD Store Options ============================= + +# Ceph configuration file path +# If using cephx authentication, this file should +# include a reference to the right keyring +# in a client. section +#rbd_store_ceph_conf = /etc/ceph/ceph.conf + +# RADOS user to authenticate as (only applicable if using cephx) +# If , a default will be chosen based on the client. section +# in rbd_store_ceph_conf +#rbd_store_user = + +# RADOS pool in which images are stored +#rbd_store_pool = images + +# RADOS images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +#rbd_store_chunk_size = 8 + +# ============ Sheepdog Store Options ============================= + +sheepdog_store_address = localhost + +sheepdog_store_port = 7000 + +# Images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +sheepdog_store_chunk_size = 64 + +# ============ Cinder Store Options =============================== + +# Info to match when looking for cinder in the service catalog +# Format is : separated values of the form: +# :: (string value) +#cinder_catalog_info = volume:cinder:publicURL + +# Override service catalog lookup with template for cinder endpoint +# e.g. http://localhost:8776/v1/%(project_id)s (string value) +#cinder_endpoint_template = + +# Region name of this node (string value) +#os_region_name = + +# Location of ca certicates file to use for cinder client requests +# (string value) +#cinder_ca_certificates_file = + +# Number of cinderclient retries on failed http calls (integer value) +#cinder_http_retries = 3 + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure = False + +# ============ VMware Datastore Store Options ===================== + +# ESX/ESXi or vCenter Server target system. +# The server value can be an IP address or a DNS name +# e.g. localhost, localhost:443, www.vmware-infra.com +#vmware_server_host = + +# Server username (string value) +#vmware_server_username = + +# Server password (string value) +#vmware_server_password = + +# Inventory path to a datacenter (string value) +# Value optional when vmware_server_ip is an ESX/ESXi host: if specified +# should be `ha-datacenter`. +#vmware_datacenter_path = + +# Datastore associated with the datacenter (string value) +#vmware_datastore_name = + +# The number of times we retry on failures +# e.g., socket error, etc (integer value) +#vmware_api_retry_count = 10 + +# The interval used for polling remote tasks +# invoked on VMware ESX/VC server in seconds (integer value) +#vmware_task_poll_interval = 5 + +# Absolute path of the folder containing the images in the datastore +# (string value) +#vmware_store_image_dir = /openstack_glance + +# Allow to perform insecure SSL requests to the target system (boolean value) +#vmware_api_insecure = False + +# ============ Delayed Delete Options ============================= + +# Turn on/off delayed delete +delayed_delete = False + +# Delayed delete time in seconds +scrub_time = 43200 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-scrubber.conf +scrubber_datadir = /var/lib/glance/scrubber + +# =============== Quota Options ================================== + +# The maximum number of image members allowed per image +#image_member_quota = 128 + +# The maximum number of image properties allowed per image +#image_property_quota = 128 + +# The maximum number of tags allowed per image +#image_tag_quota = 128 + +# The maximum number of locations allowed per image +#image_location_quota = 10 + +# Set a system wide quota for every user. This value is the total number +# of bytes that a user can use across all storage systems. A value of +# 0 means unlimited. +#user_storage_quota = 0 + +# =============== Image Cache Options ============================= + +# Base directory that the Image Cache uses +image_cache_dir = /var/lib/glance/image-cache/ + +# =============== Manager Options ================================= + +# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE. +# Whether or not to enforce that all DB tables have charset utf8. +# If your database tables do not have charset utf8 you will +# need to convert before this option is removed. This option is +# only relevant if your database engine is MySQL. +#db_enforce_mysql_charset = True + +# =============== Database Options ================================= + +[database] +# The file name to use with SQLite (string value) +#sqlite_db = glance.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = +connection=postgresql://##GLANCE_DB_USER##:##GLANCE_DB_PASSWORD##@onenode/glance + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect = False + +# seconds between db connection retries (integer value) +#db_retry_interval = 1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval = True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval = 10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries = 20 + +[keystone_authtoken] +auth_uri = ##KEYSTONE_PUBLIC_URL## +identity_uri = ##IDENTITY_URI## +admin_tenant_name = service +admin_user = ##GLANCE_SERVICE_USER## +admin_password = ##GLANCE_SERVICE_PASSWORD## + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file = glance-api-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-api-keystone], you would configure the flavor below +# as 'keystone'. +flavor = keystone + +[store_type_location_strategy] +# The scheme list to use to get store preference order. The scheme must be +# registered by one of the stores defined by the 'known_stores' config option. +# This option will be applied when you using 'store_type' option as image +# location strategy defined by the 'location_strategy' config option. +#store_type_preference = diff --git a/openstack/etc/glance/glance-cache.conf b/openstack/etc/glance/glance-cache.conf new file mode 100644 index 00000000..a4453353 --- /dev/null +++ b/openstack/etc/glance/glance-cache.conf @@ -0,0 +1,200 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/glance/image-cache.log + +# Send logs to syslog (/dev/log) instead of to file specified by `log_file` +#use_syslog = False + +# Directory that the Image Cache writes data to +image_cache_dir = /var/lib/glance/image-cache/ + +# Number of seconds after which we should consider an incomplete image to be +# stalled and eligible for reaping +image_cache_stall_time = 86400 + +# Max cache size in bytes +image_cache_max_size = 10737418240 + +# Address to find the registry server +registry_host = 0.0.0.0 + +# Port the registry server is listening on +registry_port = 9191 + +# Auth settings if using Keystone +# auth_url = http://localhost:5000/v2.0/ +# admin_tenant_name = %SERVICE_TENANT_NAME% +# admin_user = %SERVICE_USER% +# admin_password = %SERVICE_PASSWORD% + +# List of which store classes and store class locations are +# currently known to glance at startup. +# known_stores = glance.store.filesystem.Store, +# glance.store.http.Store, +# glance.store.rbd.Store, +# glance.store.s3.Store, +# glance.store.swift.Store, +# glance.store.sheepdog.Store, +# glance.store.cinder.Store, +# glance.store.vmware_datastore.Store, + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +filesystem_store_datadir = /var/lib/glance/images/ + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +swift_store_auth_version = 2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like 'localhost:8080/v1.0/' +swift_store_auth_address = localhost:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +swift_store_user = jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +swift_store_key = a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +swift_store_container = glance + +# Do we create the container if it does not exist? +swift_store_create_container_on_put = False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +swift_store_large_object_size = 5120 + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +swift_store_large_object_chunk_size = 200 + +# Whether to use ServiceNET to communicate with the Swift storage servers. +# (If you aren't RACKSPACE, leave this False!) +# +# To use ServiceNET for authentication, prefix hostname of +# `swift_store_auth_address` with 'snet-'. +# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ +swift_enable_snet = False + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +s3_store_host = localhost:8080/v1.0/ + +# User to authenticate against the S3 authentication service +s3_store_access_key = <20-char AWS access key> + +# Auth key for the user authenticating against the +# S3 authentication service +s3_store_secret_key = <40-char AWS secret key> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +s3_store_bucket = glance + +# Do we create the bucket if it does not exist? +s3_store_create_bucket_on_put = False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +# s3_store_object_buffer_dir = /path/to/dir + +# ============ Cinder Store Options =========================== + +# Info to match when looking for cinder in the service catalog +# Format is : separated values of the form: +# :: (string value) +#cinder_catalog_info = volume:cinder:publicURL + +# Override service catalog lookup with template for cinder endpoint +# e.g. http://localhost:8776/v1/%(project_id)s (string value) +#cinder_endpoint_template = + +# Region name of this node (string value) +#os_region_name = + +# Location of ca certicates file to use for cinder client requests +# (string value) +#cinder_ca_certificates_file = + +# Number of cinderclient retries on failed http calls (integer value) +#cinder_http_retries = 3 + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure = False + +# ============ VMware Datastore Store Options ===================== + +# ESX/ESXi or vCenter Server target system. +# The server value can be an IP address or a DNS name +# e.g. localhost, localhost:443, www.vmware-infra.com +#vmware_server_host = + +# Server username (string value) +#vmware_server_username = + +# Server password (string value) +#vmware_server_password = + +# Inventory path to a datacenter (string value) +# Value optional when vmware_server_ip is an ESX/ESXi host: if specified +# should be `ha-datacenter`. +#vmware_datacenter_path = + +# Datastore associated with the datacenter (string value) +#vmware_datastore_name = + +# The number of times we retry on failures +# e.g., socket error, etc (integer value) +#vmware_api_retry_count = 10 + +# The interval used for polling remote tasks +# invoked on VMware ESX/VC server in seconds (integer value) +#vmware_task_poll_interval = 5 + +# Absolute path of the folder containing the images in the datastore +# (string value) +#vmware_store_image_dir = /openstack_glance + +# Allow to perform insecure SSL requests to the target system (boolean value) +#vmware_api_insecure = False + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +# metadata_encryption_key = <16, 24 or 32 char registry metadata key> diff --git a/openstack/etc/glance/glance-registry-paste.ini b/openstack/etc/glance/glance-registry-paste.ini new file mode 100644 index 00000000..ab8c2856 --- /dev/null +++ b/openstack/etc/glance/glance-registry-paste.ini @@ -0,0 +1,25 @@ +# Use this pipeline for no auth - DEFAULT +[pipeline:glance-registry] +pipeline = unauthenticated-context registryapp + +# Use this pipeline for keystone auth +[pipeline:glance-registry-keystone] +pipeline = authtoken context registryapp + +# Use this pipeline for authZ only. This means that the registry will treat a +# user as authenticated without making requests to keystone to reauthenticate +# the user. +[pipeline:glance-registry-trusted-auth] +pipeline = context registryapp + +[app:registryapp] +paste.app_factory = glance.registry.api:API.factory + +[filter:context] +paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory + +[filter:unauthenticated-context] +paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/openstack/etc/glance/glance-registry.conf b/openstack/etc/glance/glance-registry.conf new file mode 100644 index 00000000..fbfe7c8d --- /dev/null +++ b/openstack/etc/glance/glance-registry.conf @@ -0,0 +1,197 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Address to bind the registry server +bind_host = 0.0.0.0 + +# Port the bind the registry server to +bind_port = 9191 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/glance/registry.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# API to use for accessing data. Default value points to sqlalchemy +# package. +#data_api = glance.db.sqlalchemy.api + +# Number of Glance Registry worker processes to start. +# On machines with more than one CPU increasing this value +# may improve performance (especially if using SSL with +# compression turned on). It is typically recommended to set +# this value to the number of CPUs present on your machine. +workers = 1 + +# Enable Registry API versions individually or simultaneously +#enable_v1_registry = True +#enable_v2_registry = True + +# Limit the api to return `param_limit_max` items in a call to a container. If +# a larger `limit` query param is provided, it will be reduced to this value. +api_limit_max = 1000 + +# If a `limit` query param is not provided in an api request, it will +# default to `limit_param_default` +limit_param_default = 25 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create = False + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug = True + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL1 + +# ================= SSL Options =============================== + +# Certificate file to use when starting registry server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting registry server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ================= Database Options ========================== + +[database] +# The file name to use with SQLite (string value) +#sqlite_db = glance.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = +connection=postgresql://##GLANCE_DB_USER##:##GLANCE_DB_PASSWORD##@onenode/glance + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect = False + +# seconds between db connection retries (integer value) +#db_retry_interval = 1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval = True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval = 10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries = 20 + +[keystone_authtoken] +auth_uri = ##KEYSTONE_PUBLIC_URL## +identity_uri = ##IDENTITY_URI## +admin_tenant_name = service +admin_user = ##GLANCE_SERVICE_USER## +admin_password = ##GLANCE_SERVICE_PASSWORD## + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file = glance-registry-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-registry-keystone], you would configure the flavor below +# as 'keystone'. +flavor = keystone diff --git a/openstack/etc/glance/glance-scrubber.conf b/openstack/etc/glance/glance-scrubber.conf new file mode 100644 index 00000000..5c5e8d4c --- /dev/null +++ b/openstack/etc/glance/glance-scrubber.conf @@ -0,0 +1,56 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/glance/scrubber.log + +# Send logs to syslog (/dev/log) instead of to file specified by `log_file` +#use_syslog = False + +# Should we run our own loop or rely on cron/scheduler to run us +daemon = False + +# Loop time between checking for new items to schedule for delete +wakeup_time = 300 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-api.conf +scrubber_datadir = /var/lib/glance/scrubber + +# Only one server in your deployment should be designated the cleanup host +cleanup_scrubber = False + +# pending_delete items older than this time are candidates for cleanup +cleanup_scrubber_time = 86400 + +# Address to find the registry server for cleanups +registry_host = 0.0.0.0 + +# Port the registry server is listening on +registry_port = 9191 + +# Auth settings if using Keystone +# auth_url = http://127.0.0.1:5000/v2.0/ +# admin_tenant_name = %SERVICE_TENANT_NAME% +# admin_user = %SERVICE_USER% +# admin_password = %SERVICE_PASSWORD% + +# Directory to use for lock files. Default to a temp directory +# (string value). This setting needs to be the same for both +# glance-scrubber and glance-api. +#lock_path= + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> diff --git a/openstack/etc/glance/logging.conf b/openstack/etc/glance/logging.conf new file mode 100644 index 00000000..8de2ffd1 --- /dev/null +++ b/openstack/etc/glance/logging.conf @@ -0,0 +1,54 @@ +[loggers] +keys=root,api,registry,combined + +[formatters] +keys=normal,normal_with_name,debug + +[handlers] +keys=production,file,devel + +[logger_root] +level=NOTSET +handlers=devel + +[logger_api] +level=DEBUG +handlers=devel +qualname=glance-api + +[logger_registry] +level=DEBUG +handlers=devel +qualname=glance-registry + +[logger_combined] +level=DEBUG +handlers=devel +qualname=glance-combined + +[handler_production] +class=handlers.SysLogHandler +level=ERROR +formatter=normal_with_name +args=(('127.0.0.1', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) + +[handler_file] +class=FileHandler +level=DEBUG +formatter=normal_with_name +args=('glance.log', 'w') + +[handler_devel] +class=StreamHandler +level=NOTSET +formatter=debug +args=(sys.stdout,) + +[formatter_normal] +format=%(asctime)s %(levelname)s %(message)s + +[formatter_normal_with_name] +format=(%(name)s): %(asctime)s %(levelname)s %(message)s + +[formatter_debug] +format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/openstack/etc/glance/policy.json b/openstack/etc/glance/policy.json new file mode 100644 index 00000000..8b7e6871 --- /dev/null +++ b/openstack/etc/glance/policy.json @@ -0,0 +1,32 @@ +{ + "context_is_admin": "role:admin", + "default": "", + + "add_image": "", + "delete_image": "", + "get_image": "", + "get_images": "", + "modify_image": "", + "publicize_image": "role:admin", + "copy_from": "", + + "download_image": "", + "upload_image": "", + + "delete_image_location": "", + "get_image_location": "", + "set_image_location": "", + + "add_member": "", + "delete_member": "", + "get_member": "", + "get_members": "", + "modify_member": "", + + "manage_image_cache": "role:admin", + + "get_task": "", + "get_tasks": "", + "add_task": "", + "modify_task": "" +} diff --git a/openstack/etc/glance/schema-image.json b/openstack/etc/glance/schema-image.json new file mode 100644 index 00000000..5aafd6b3 --- /dev/null +++ b/openstack/etc/glance/schema-image.json @@ -0,0 +1,28 @@ +{ + "kernel_id": { + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." + }, + "ramdisk_id": { + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." + }, + "instance_uuid": { + "type": "string", + "description": "ID of instance used to create this image." + }, + "architecture": { + "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", + "type": "string" + }, + "os_distro": { + "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", + "type": "string" + }, + "os_version": { + "description": "Operating system version as specified by the distributor", + "type": "string" + } +} diff --git a/openstack/etc/horizon/apache-horizon.conf b/openstack/etc/horizon/apache-horizon.conf new file mode 100644 index 00000000..e51fbd17 --- /dev/null +++ b/openstack/etc/horizon/apache-horizon.conf @@ -0,0 +1,32 @@ + + WSGIScriptAlias /horizon /var/lib/horizon/django.wsgi + WSGIDaemonProcess horizon user=horizon group=horizon processes=3 threads=10 home=/var/lib/horizon display-name=horzion + WSGIApplicationGroup %{GLOBAL} + + SetEnv APACHE_RUN_USER apache + SetEnv APACHE_RUN_GROUP apache + WSGIProcessGroup horizon + + DocumentRoot /var/lib/horizon/.blackhole + Alias /static /var/lib/horizon/openstack_dashboard/static + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + # Apache 2.4 uses mod_authz_host for access control now (instead of + # "Allow") + + Order allow,deny + Allow from all + + = 2.4> + Require all granted + + + + ErrorLog /var/log/httpd/horizon_error.log + LogLevel warn + CustomLog /var/log/httpd/horizon_access.log combined + + +WSGISocketPrefix /var/run/httpd diff --git a/openstack/etc/horizon/openstack_dashboard/local_settings.py b/openstack/etc/horizon/openstack_dashboard/local_settings.py new file mode 100644 index 00000000..dc757eb1 --- /dev/null +++ b/openstack/etc/horizon/openstack_dashboard/local_settings.py @@ -0,0 +1,549 @@ +import os + +from django.utils.translation import ugettext_lazy as _ + +from openstack_dashboard import exceptions + +DEBUG = True +TEMPLATE_DEBUG = DEBUG + +STATIC_ROOT = "/var/lib/horizon/openstack_dashboard/static" + +# Required for Django 1.5. +# If horizon is running in production (DEBUG is False), set this +# with the list of host/domain names that the application can serve. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts +#ALLOWED_HOSTS = ['horizon.example.com', ] +ALLOWED_HOSTS = ['*'] + +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +#CSRF_COOKIE_SECURE = True +#SESSION_COOKIE_SECURE = True + +# Overrides for OpenStack API versions. Use this setting to force the +# OpenStack dashboard to use a specific API version for a given service API. +# NOTE: The version should be formatted as it appears in the URL for the +# service API. For example, The identity service APIs have inconsistent +# use of the decimal point, so valid options would be "2.0" or "3". +# OPENSTACK_API_VERSIONS = { +# "data_processing": 1.1, +# "identity": 3, +# "volume": 2 +# } + +# Set this to True if running on multi-domain model. When this is enabled, it +# will require user to enter the Domain name in addition to username for login. +# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False + +# Overrides the default domain used when running on single-domain model +# with Keystone V3. All entities will be created in the default domain. +# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' + +# Set Console type: +# valid options would be "AUTO"(default), "VNC", "SPICE", "RDP" or None +# Set to None explicitly if you want to deactivate the console. +# CONSOLE_TYPE = "AUTO" + +# Default OpenStack Dashboard configuration. +HORIZON_CONFIG = { + 'user_home': 'openstack_dashboard.views.get_user_home', + 'ajax_queue_limit': 10, + 'auto_fade_alerts': { + 'delay': 3000, + 'fade_duration': 1500, + 'types': ['alert-success', 'alert-info'] + }, + 'help_url': "http://docs.openstack.org", + 'exceptions': {'recoverable': exceptions.RECOVERABLE, + 'not_found': exceptions.NOT_FOUND, + 'unauthorized': exceptions.UNAUTHORIZED}, + 'modal_backdrop': 'static', + 'angular_modules': [], + 'js_files': [], +} + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# } + +# Disable simplified floating IP address management for deployments with +# multiple floating IP pools or complex network requirements. +# HORIZON_CONFIG["simple_ip_management"] = False + +# Turn off browser autocompletion for forms including the login form and +# the database creation workflow if so desired. +# HORIZON_CONFIG["password_autocomplete"] = "off" + +# Setting this to True will disable the reveal button for password fields, +# including on the login form. +# HORIZON_CONFIG["disable_password_reveal"] = False + +#LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +LOCAL_PATH = "/var/lib/horizon" + +# Set custom secret key: +# You can either set it to a specific value or you can let horizon generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. +from horizon.utils import secret_key +SECRET_KEY = secret_key.generate_or_read_from_file( + os.path.join(LOCAL_PATH, '.secret_key_store')) + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': '127.0.0.1:11211', + } +} + +#CACHES = { +# 'default': { +# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache' +# } +#} + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +# EMAIL_HOST = 'smtp.my-company.com' +# EMAIL_PORT = 25 +# EMAIL_HOST_USER = 'djangomail' +# EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" + +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + +# The CA certificate to use to verify SSL connections +# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_group': True, + 'can_edit_project': True, + 'can_edit_domain': True, + 'can_edit_role': True +} + +#Setting this to True, will add a new "Retrieve Password" action on instance, +#allowing Admin session password retrieval/decryption. +#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False + +# The Xen Hypervisor has the ability to set the mount point for volumes +# attached to instances (other Hypervisors currently do not). Setting +# can_set_mount_point to True will add the option to set the mount point +# from the UI. +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': False, + 'can_set_password': False, +} + +# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional +# services provided by cinder that is not exposed by its extension API. +OPENSTACK_CINDER_FEATURES = { + 'enable_backup': False, +} + +# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional +# services provided by neutron. Options currently available are load +# balancer service, security groups, quotas, VPN service. +OPENSTACK_NEUTRON_NETWORK = { + 'enable_router': True, + 'enable_quotas': True, + 'enable_ipv6': True, + 'enable_distributed_router': False, + 'enable_ha_router': False, + 'enable_lb': True, + 'enable_firewall': True, + 'enable_vpn': True, + # The profile_support option is used to detect if an external router can be + # configured via the dashboard. When using specific plugins the + # profile_support can be turned on if needed. + 'profile_support': None, + #'profile_support': 'cisco', + # Set which provider network types are supported. Only the network types + # in this list will be available to choose from when creating a network. + # Network types include local, flat, vlan, gre, and vxlan. + 'supported_provider_types': ['*'], +} + +# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features +# in the OpenStack Dashboard related to the Image service, such as the list +# of supported image formats. +# OPENSTACK_IMAGE_BACKEND = { +# 'image_formats': [ +# ('', _('Select format')), +# ('aki', _('AKI - Amazon Kernel Image')), +# ('ami', _('AMI - Amazon Machine Image')), +# ('ari', _('ARI - Amazon Ramdisk Image')), +# ('iso', _('ISO - Optical Disk Image')), +# ('qcow2', _('QCOW2 - QEMU Emulator')), +# ('raw', _('Raw')), +# ('vdi', _('VDI')), +# ('vhd', _('VHD')), +# ('vmdk', _('VMDK')) +# ] +# } + +# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for +# image custom property attributes that appear on image detail pages. +IMAGE_CUSTOM_PROPERTY_TITLES = { + "architecture": _("Architecture"), + "kernel_id": _("Kernel ID"), + "ramdisk_id": _("Ramdisk ID"), + "image_state": _("Euca2ools state"), + "project_id": _("Project ID"), + "image_type": _("Image Type") +} + +# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image +# custom properties should not be displayed in the Image Custom Properties +# table. +IMAGE_RESERVED_CUSTOM_PROPERTIES = [] + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'publicURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the +# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is None. This +# value should differ from OPENSTACK_ENDPOINT_TYPE if used. +#SECONDARY_ENDPOINT_TYPE = "publicURL" + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# Specify a maximum number of items to display in a dropdown. +DROPDOWN_MAX_ITEMS = 30 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +# When launching an instance, the menu of available flavors is +# sorted by RAM usage, ascending. If you would like a different sort order, +# you can provide another flavor attribute as sorting key. Alternatively, you +# can provide a custom callback method to use for sorting. You can also provide +# a flag for reverse sort. For more info, see +# http://docs.python.org/2/library/functions.html#sorted +# CREATE_INSTANCE_FLAVOR_SORT = { +# 'key': 'name', +# # or +# 'key': my_awesome_callback_method, +# 'reverse': False, +# } + +# The Horizon Policy Enforcement engine uses these values to load per service +# policy rule files. The content of these files should match the files the +# OpenStack services are using to determine role based access control in the +# target installation. + +# Path to directory containing policy.json files +#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") +# Map of local copy of service policy files +#POLICY_FILES = { +# 'identity': 'keystone_policy.json', +# 'compute': 'nova_policy.json', +# 'volume': 'cinder_policy.json', +# 'image': 'glance_policy.json', +# 'orchestration': 'heat_policy.json', +# 'network': 'neutron_policy.json', +#} + +# Trove user and database extension support. By default support for +# creating users and databases on database instances is turned on. +# To disable these extensions set the permission here to something +# unusable such as ["!"]. +# TROVE_ADD_USER_PERMS = [] +# TROVE_ADD_DATABASE_PERMS = [] + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'django.utils.log.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'cinderclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'neutronclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'heatclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'ceilometerclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'troveclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'swiftclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_auth': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'django': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'iso8601': { + 'handlers': ['null'], + 'propagate': False, + }, + 'scss': { + 'handlers': ['null'], + 'propagate': False, + }, + } +} + +# 'direction' should not be specified for all_tcp/udp/icmp. +# It is specified in the form. +SECURITY_GROUP_RULES = { + 'all_tcp': { + 'name': _('All TCP'), + 'ip_protocol': 'tcp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_udp': { + 'name': _('All UDP'), + 'ip_protocol': 'udp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_icmp': { + 'name': _('All ICMP'), + 'ip_protocol': 'icmp', + 'from_port': '-1', + 'to_port': '-1', + }, + 'ssh': { + 'name': 'SSH', + 'ip_protocol': 'tcp', + 'from_port': '22', + 'to_port': '22', + }, + 'smtp': { + 'name': 'SMTP', + 'ip_protocol': 'tcp', + 'from_port': '25', + 'to_port': '25', + }, + 'dns': { + 'name': 'DNS', + 'ip_protocol': 'tcp', + 'from_port': '53', + 'to_port': '53', + }, + 'http': { + 'name': 'HTTP', + 'ip_protocol': 'tcp', + 'from_port': '80', + 'to_port': '80', + }, + 'pop3': { + 'name': 'POP3', + 'ip_protocol': 'tcp', + 'from_port': '110', + 'to_port': '110', + }, + 'imap': { + 'name': 'IMAP', + 'ip_protocol': 'tcp', + 'from_port': '143', + 'to_port': '143', + }, + 'ldap': { + 'name': 'LDAP', + 'ip_protocol': 'tcp', + 'from_port': '389', + 'to_port': '389', + }, + 'https': { + 'name': 'HTTPS', + 'ip_protocol': 'tcp', + 'from_port': '443', + 'to_port': '443', + }, + 'smtps': { + 'name': 'SMTPS', + 'ip_protocol': 'tcp', + 'from_port': '465', + 'to_port': '465', + }, + 'imaps': { + 'name': 'IMAPS', + 'ip_protocol': 'tcp', + 'from_port': '993', + 'to_port': '993', + }, + 'pop3s': { + 'name': 'POP3S', + 'ip_protocol': 'tcp', + 'from_port': '995', + 'to_port': '995', + }, + 'ms_sql': { + 'name': 'MS SQL', + 'ip_protocol': 'tcp', + 'from_port': '1433', + 'to_port': '1433', + }, + 'mysql': { + 'name': 'MYSQL', + 'ip_protocol': 'tcp', + 'from_port': '3306', + 'to_port': '3306', + }, + 'rdp': { + 'name': 'RDP', + 'ip_protocol': 'tcp', + 'from_port': '3389', + 'to_port': '3389', + }, +} + +# Deprecation Notice: +# +# The setting FLAVOR_EXTRA_KEYS has been deprecated. +# Please load extra spec metadata into the Glance Metadata Definition Catalog. +# +# The sample quota definitions can be found in: +# /etc/metadefs/compute-quota.json +# +# The metadata definition catalog supports CLI and API: +# $glance --os-image-api-version 2 help md-namespace-import +# $glance-manage db_load_metadefs +# +# See Metadata Definitions on: http://docs.openstack.org/developer/glance/ + +# Indicate to the Sahara data processing service whether or not +# automatic floating IP allocation is in effect. If it is not +# in effect, the user will be prompted to choose a floating IP +# pool for use in their cluster. False by default. You would want +# to set this to True if you were running Nova Networking with +# auto_assign_floating_ip = True. +# SAHARA_AUTO_IP_ALLOCATION_ENABLED = False + +# The hash algorithm to use for authentication tokens. This must +# match the hash algorithm that the identity server and the +# auth_token middleware are using. Allowed values are the +# algorithms supported by Python's hashlib library. +# OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' + diff --git a/openstack/etc/hosts b/openstack/etc/hosts new file mode 100644 index 00000000..b35139ac --- /dev/null +++ b/openstack/etc/hosts @@ -0,0 +1,2 @@ +127.0.0.1 localhost onenode +::1 localhost diff --git a/openstack/etc/keystone/keystone-paste.ini b/openstack/etc/keystone/keystone-paste.ini new file mode 100644 index 00000000..cd132971 --- /dev/null +++ b/openstack/etc/keystone/keystone-paste.ini @@ -0,0 +1,112 @@ +# Keystone PasteDeploy configuration file. + +[filter:debug] +paste.filter_factory = keystone.common.wsgi:Debug.factory + +[filter:build_auth_context] +paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory + +[filter:token_auth] +paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory + +[filter:admin_token_auth] +paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory + +[filter:xml_body] +paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory + +[filter:xml_body_v2] +paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV2.factory + +[filter:xml_body_v3] +paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV3.factory + +[filter:json_body] +paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory + +[filter:user_crud_extension] +paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory + +[filter:crud_extension] +paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory + +[filter:ec2_extension] +paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory + +[filter:ec2_extension_v3] +paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory + +[filter:federation_extension] +paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory + +[filter:oauth1_extension] +paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory + +[filter:s3_extension] +paste.filter_factory = keystone.contrib.s3:S3Extension.factory + +[filter:endpoint_filter_extension] +paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory + +[filter:simple_cert_extension] +paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory + +[filter:revoke_extension] +paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory + +[filter:url_normalize] +paste.filter_factory = keystone.middleware:NormalizingFilter.factory + +[filter:sizelimit] +paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory + +[filter:stats_monitoring] +paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory + +[filter:stats_reporting] +paste.filter_factory = keystone.contrib.stats:StatsExtension.factory + +[filter:access_log] +paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory + +[app:public_service] +paste.app_factory = keystone.service:public_app_factory + +[app:service_v3] +paste.app_factory = keystone.service:v3_app_factory + +[app:admin_service] +paste.app_factory = keystone.service:admin_app_factory + +[pipeline:public_api] +pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension user_crud_extension public_service + +[pipeline:admin_api] +pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension s3_extension crud_extension admin_service + +[pipeline:api_v3] +pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v3 json_body ec2_extension_v3 s3_extension simple_cert_extension service_v3 + +[app:public_version_service] +paste.app_factory = keystone.service:public_version_app_factory + +[app:admin_version_service] +paste.app_factory = keystone.service:admin_version_app_factory + +[pipeline:public_version_api] +pipeline = sizelimit url_normalize xml_body public_version_service + +[pipeline:admin_version_api] +pipeline = sizelimit url_normalize xml_body admin_version_service + +[composite:main] +use = egg:Paste#urlmap +/v2.0 = public_api +/v3 = api_v3 +/ = public_version_api + +[composite:admin] +use = egg:Paste#urlmap +/v2.0 = admin_api +/v3 = api_v3 +/ = admin_version_api diff --git a/openstack/etc/keystone/keystone.conf b/openstack/etc/keystone/keystone.conf new file mode 100644 index 00000000..a46cc5fc --- /dev/null +++ b/openstack/etc/keystone/keystone.conf @@ -0,0 +1,1384 @@ +[DEFAULT] + +# +# Options defined in keystone +# + +# A "shared secret" that can be used to bootstrap Keystone. +# This "token" does not represent a user, and carries no +# explicit authorization. To disable in production (highly +# recommended), remove AdminTokenAuthMiddleware from your +# paste application pipelines (for example, in keystone- +# paste.ini). (string value) +admin_token=##KEYSTONE_TEMPORARY_ADMIN_TOKEN## + +# The IP address of the network interface for the public +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#public_bind_host=0.0.0.0 + +# The IP address of the network interface for the admin +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#admin_bind_host=0.0.0.0 + +# The port which the OpenStack Compute service listens on. +# (integer value) +compute_port=8774 + +# The port number which the admin service listens on. (integer +# value) +admin_port=35357 + +# The port number which the public service listens on. +# (integer value) +public_port=5000 + +# The base public endpoint URL for Keystone that is advertised +# to clients (NOTE: this does NOT affect how Keystone listens +# for connections). Defaults to the base host URL of the +# request. E.g. a request to http://server:5000/v2.0/users +# will default to http://server:5000. You should only need to +# set this value if the base URL contains a path (e.g. +# /prefix/v2.0) or the endpoint should be found on a different +# server. (string value) +#public_endpoint= + +# The base admin endpoint URL for Keystone that is advertised +# to clients (NOTE: this does NOT affect how Keystone listens +# for connections). Defaults to the base host URL of the +# request. E.g. a request to http://server:35357/v2.0/users +# will default to http://server:35357. You should only need to +# set this value if the base URL contains a path (e.g. +# /prefix/v2.0) or the endpoint should be found on a different +# server. (string value) +#admin_endpoint= + +# The number of worker processes to serve the public WSGI +# application (integer value) +#public_workers=1 + +# The number of worker processes to serve the admin WSGI +# application (integer value) +#admin_workers=1 + +# Enforced by optional sizelimit middleware +# (keystone.middleware:RequestBodySizeLimiter). (integer +# value) +#max_request_body_size=114688 + +# Limit the sizes of user & project ID/names. (integer value) +#max_param_size=64 + +# Similar to max_param_size, but provides an exception for +# token values. (integer value) +#max_token_size=8192 + +# During a SQL upgrade member_role_id will be used to create a +# new role that will replace records in the assignment table +# with explicit role grants. After migration, the +# member_role_id will be used in the API add_user_to_project. +# (string value) +#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab + +# During a SQL upgrade member_role_name will be used to create +# a new role that will replace records in the assignment table +# with explicit role grants. After migration, member_role_name +# will be ignored. (string value) +#member_role_name=_member_ + +# The value passed as the keyword "rounds" to passlib's +# encrypt method. (integer value) +#crypt_strength=40000 + +# Set this to true if you want to enable TCP_KEEPALIVE on +# server sockets, i.e. sockets used by the Keystone wsgi +# server for client connections. (boolean value) +#tcp_keepalive=false + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Only applies if tcp_keepalive is true. Not supported +# on OS X. (integer value) +#tcp_keepidle=600 + +# The maximum number of entities that will be returned in a +# collection, with no limit set by default. This global limit +# may be then overridden for a specific driver, by specifying +# a list_limit in the appropriate section (e.g. [assignment]). +# (integer value) +#list_limit= + +# Set this to false if you want to enable the ability for +# user, group and project entities to be moved between domains +# by updating their domain_id. Allowing such movement is not +# recommended if the scope of a domain admin is being +# restricted by use of an appropriate policy file (see +# policy.v3cloudsample as an example). (boolean value) +#domain_id_immutable=true + +# If set to true, strict password length checking is performed +# for password manipulation. If a password exceeds the maximum +# length, the operation will fail with an HTTP 403 Forbidden +# error. If set to false, passwords are automatically +# truncated to the maximum length. (boolean value) +#strict_password_check=false + + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions + +# Qpid broker hostname. (string value) +#qpid_hostname=keystone + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +rabbit_host=##RABBITMQ_HOST## + +# The RabbitMQ broker port where a single node is used. +# (integer value) +rabbit_port=##RABBITMQ_PORT## + +# RabbitMQ HA cluster host:port pairs. (list value) +rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid=##RABBITMQ_USER## + +# The RabbitMQ password. (string value) +rabbit_password=##RABBITMQ_PASSWORD## + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=keystone + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Host to locate redis. (string value) +#host=localhost + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in keystone.notifications +# + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in keystone.middleware.ec2_token +# + +# URL to get token from ec2 request. (string value) +#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens + +# Required if EC2 server requires client certificate. (string +# value) +#keystone_ec2_keyfile= + +# Client certificate key filename. Required if EC2 server +# requires client certificate. (string value) +#keystone_ec2_certfile= + +# A PEM encoded certificate authority to use when verifying +# HTTPS connections. Defaults to the system CAs. (string +# value) +#keystone_ec2_cafile= + +# Disable SSL certificate verification. (boolean value) +#keystone_ec2_insecure=false + + +# +# Options defined in keystone.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in keystone.openstack.common.lockutils +# + +# Enables or disables inter-process locks. (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +#lock_path= + + +# +# Options defined in keystone.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error. (boolean value) +#use_stderr=true + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will chang in J to honor RFC5424. (boolean +# value) +#use_syslog=false + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in keystone.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. +# (string value) +#policy_default_rule=default + + +[assignment] + +# +# Options defined in keystone +# + +# Assignment backend driver. (string value) +#driver= + +# Toggle for assignment caching. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# TTL (in seconds) to cache assignment data. This has no +# effect unless global caching is enabled. (integer value) +#cache_time= + +# Maximum number of entities that will be returned in an +# assignment collection. (integer value) +#list_limit= + + +[auth] + +# +# Options defined in keystone +# + +# Default auth methods. (list value) +#methods=external,password,token + +# The password auth plugin module. (string value) +#password=keystone.auth.plugins.password.Password + +# The token auth plugin module. (string value) +#token=keystone.auth.plugins.token.Token + +# The external (REMOTE_USER) auth plugin module. (string +# value) +#external=keystone.auth.plugins.external.DefaultDomain + + +[cache] + +# +# Options defined in keystone +# + +# Prefix for building the configuration dictionary for the +# cache region. This should not need to be changed unless +# there is another dogpile.cache region with the same +# configuration name. (string value) +#config_prefix=cache.keystone + +# Default TTL, in seconds, for any cached item in the +# dogpile.cache region. This applies to any cached method that +# doesn't have an explicit cache expiration time defined for +# it. (integer value) +#expiration_time=600 + +# Dogpile.cache backend module. It is recommended that +# Memcache (dogpile.cache.memcached) or Redis +# (dogpile.cache.redis) be used in production deployments. +# Small workloads (single process) like devstack can use the +# dogpile.cache.memory backend. (string value) +#backend=keystone.common.cache.noop + +# Use a key-mangling function (sha1) to ensure fixed length +# cache-keys. This is toggle-able for debugging purposes, it +# is highly recommended to always leave this set to true. +# (boolean value) +#use_key_mangler=true + +# Arguments supplied to the backend module. Specify this +# option once per argument to be passed to the dogpile.cache +# backend. Example format: ":". (multi valued) +#backend_argument= + +# Proxy classes to import that will affect the way the +# dogpile.cache backend functions. See the dogpile.cache +# documentation on changing-backend-behavior. (list value) +#proxies= + +# Global toggle for all caching using the should_cache_fn +# mechanism. (boolean value) +#enabled=false + +# Extra debugging from the cache backend (cache keys, +# get/set/delete/etc calls). This is only really useful if you +# need to see the specific cache-backend get/set/delete calls +# with the keys/values. Typically this should be left set to +# false. (boolean value) +#debug_cache_backend=false + + +[catalog] + +# +# Options defined in keystone +# + +# Catalog template file name for use with the template catalog +# backend. (string value) +#template_file=default_catalog.templates + +# Catalog backend driver. (string value) +#driver=keystone.catalog.backends.sql.Catalog + +# Maximum number of entities that will be returned in a +# catalog collection. (integer value) +#list_limit= + + +[credential] + +# +# Options defined in keystone +# + +# Credential backend driver. (string value) +#driver=keystone.credential.backends.sql.Credential + + +[database] + +# +# Options defined in oslo.db +# + +# The file name to use with SQLite. (string value) +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the +# database. (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection= +connection=postgresql://##KEYSTONE_DB_USER##:##KEYSTONE_DB_PASSWORD##@onenode/keystone + +# The SQLAlchemy connection string to use to connect to the +# slave database. (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, +# 100=Everything. (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost. (boolean value) +#use_db_reconnect=false + +# Seconds between database connection retries. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between database connection +# retries up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between +# database connection retries. (integer value) +#db_max_retry_interval=10 + +# Maximum database connection retries before error is raised. +# Set to -1 to specify an infinite retry count. (integer +# value) +#db_max_retries=20 + + +[ec2] + +# +# Options defined in keystone +# + +# EC2Credential backend driver. (string value) +#driver=keystone.contrib.ec2.backends.kvs.Ec2 + + +[endpoint_filter] + +# +# Options defined in keystone +# + +# Endpoint Filter backend driver (string value) +#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter + +# Toggle to return all active endpoints if no filter exists. +# (boolean value) +#return_all_endpoints_if_no_filter=true + + +[federation] + +# +# Options defined in keystone +# + +# Federation backend driver. (string value) +#driver=keystone.contrib.federation.backends.sql.Federation + +# Value to be used when filtering assertion parameters from +# the environment. (string value) +#assertion_prefix= + + +[identity] + +# +# Options defined in keystone +# + +# This references the domain to use for all Identity API v2 +# requests (which are not aware of domains). A domain with +# this ID will be created for you by keystone-manage db_sync +# in migration 008. The domain referenced by this ID cannot be +# deleted on the v3 API, to prevent accidentally breaking the +# v2 API. There is nothing special about this domain, other +# than the fact that it must exist to order to maintain +# support for your v2 clients. (string value) +#default_domain_id=default + +# A subset (or all) of domains can have their own identity +# driver, each with their own partial configuration file in a +# domain configuration directory. Only values specific to the +# domain need to be placed in the domain specific +# configuration file. This feature is disabled by default; set +# to true to enable. (boolean value) +#domain_specific_drivers_enabled=false + +# Path for Keystone to locate the domain specific identity +# configuration files if domain_specific_drivers_enabled is +# set to true. (string value) +#domain_config_dir=/etc/keystone/domains + +# Identity backend driver. (string value) +#driver=keystone.identity.backends.sql.Identity + +# Maximum supported length for user passwords; decrease to +# improve performance. (integer value) +#max_password_length=4096 + +# Maximum number of entities that will be returned in an +# identity collection. (integer value) +#list_limit= + + +[kvs] + +# +# Options defined in keystone +# + +# Extra dogpile.cache backend modules to register with the +# dogpile.cache library. (list value) +#backends= + +# Prefix for building the configuration dictionary for the KVS +# region. This should not need to be changed unless there is +# another dogpile.cache region with the same configuration +# name. (string value) +#config_prefix=keystone.kvs + +# Toggle to disable using a key-mangling function to ensure +# fixed length keys. This is toggle-able for debugging +# purposes, it is highly recommended to always leave this set +# to true. (boolean value) +#enable_key_mangler=true + +# Default lock timeout for distributed locking. (integer +# value) +#default_lock_timeout=5 + + +[ldap] + +# +# Options defined in keystone +# + +# URL for connecting to the LDAP server. (string value) +#url=ldap://keystone + +# User BindDN to query the LDAP server. (string value) +#user= + +# Password for the BindDN to query the LDAP server. (string +# value) +#password= + +# LDAP server suffix (string value) +#suffix=cn=example,cn=com + +# If true, will add a dummy member to groups. This is required +# if the objectclass for groups requires the "member" +# attribute. (boolean value) +#use_dumb_member=false + +# DN of the "dummy member" to use when "use_dumb_member" is +# enabled. (string value) +#dumb_member=cn=dumb,dc=nonexistent + +# Delete subtrees using the subtree delete control. Only +# enable this option if your LDAP server supports subtree +# deletion. (boolean value) +#allow_subtree_delete=false + +# The LDAP scope for queries, this can be either "one" +# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). +# (string value) +#query_scope=one + +# Maximum results per page; a value of zero ("0") disables +# paging. (integer value) +#page_size=0 + +# The LDAP dereferencing option for queries. This can be +# either "never", "searching", "always", "finding" or +# "default". The "default" option falls back to using default +# dereferencing configured by your ldap.conf. (string value) +#alias_dereferencing=default + +# Sets the LDAP debugging level for LDAP calls. A value of 0 +# means that debugging is not enabled. This value is a +# bitmask, consult your LDAP documentation for possible +# values. (integer value) +#debug_level= + +# Override the system's default referral chasing behavior for +# queries. (boolean value) +#chase_referrals= + +# Search base for users. (string value) +#user_tree_dn= + +# LDAP search filter for users. (string value) +#user_filter= + +# LDAP objectclass for users. (string value) +#user_objectclass=inetOrgPerson + +# LDAP attribute mapped to user id. (string value) +#user_id_attribute=cn + +# LDAP attribute mapped to user name. (string value) +#user_name_attribute=sn + +# LDAP attribute mapped to user email. (string value) +#user_mail_attribute=email + +# LDAP attribute mapped to password. (string value) +#user_pass_attribute=userPassword + +# LDAP attribute mapped to user enabled flag. (string value) +#user_enabled_attribute=enabled + +# Bitmask integer to indicate the bit that the enabled value +# is stored in if the LDAP server represents "enabled" as a +# bit on an integer rather than a boolean. A value of "0" +# indicates the mask is not used. If this is not set to "0" +# the typical value is "2". This is typically used when +# "user_enabled_attribute = userAccountControl". (integer +# value) +#user_enabled_mask=0 + +# Default value to enable users. This should match an +# appropriate int value if the LDAP server uses non-boolean +# (bitmask) values to indicate if a user is enabled or +# disabled. If this is not set to "True" the typical value is +# "512". This is typically used when "user_enabled_attribute = +# userAccountControl". (string value) +#user_enabled_default=True + +# List of attributes stripped off the user on update. (list +# value) +#user_attribute_ignore=default_project_id,tenants + +# LDAP attribute mapped to default_project_id for users. +# (string value) +#user_default_project_id_attribute= + +# Allow user creation in LDAP backend. (boolean value) +#user_allow_create=true + +# Allow user updates in LDAP backend. (boolean value) +#user_allow_update=true + +# Allow user deletion in LDAP backend. (boolean value) +#user_allow_delete=true + +# If true, Keystone uses an alternative method to determine if +# a user is enabled or not by checking if they are a member of +# the "user_enabled_emulation_dn" group. (boolean value) +#user_enabled_emulation=false + +# DN of the group entry to hold enabled users when using +# enabled emulation. (string value) +#user_enabled_emulation_dn= + +# List of additional LDAP attributes used for mapping +# additional attribute mappings for users. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#user_additional_attribute_mapping= + +# Search base for projects (string value) +# Deprecated group/name - [ldap]/tenant_tree_dn +#project_tree_dn= + +# LDAP search filter for projects. (string value) +# Deprecated group/name - [ldap]/tenant_filter +#project_filter= + +# LDAP objectclass for projects. (string value) +# Deprecated group/name - [ldap]/tenant_objectclass +#project_objectclass=groupOfNames + +# LDAP attribute mapped to project id. (string value) +# Deprecated group/name - [ldap]/tenant_id_attribute +#project_id_attribute=cn + +# LDAP attribute mapped to project membership for user. +# (string value) +# Deprecated group/name - [ldap]/tenant_member_attribute +#project_member_attribute=member + +# LDAP attribute mapped to project name. (string value) +# Deprecated group/name - [ldap]/tenant_name_attribute +#project_name_attribute=ou + +# LDAP attribute mapped to project description. (string value) +# Deprecated group/name - [ldap]/tenant_desc_attribute +#project_desc_attribute=description + +# LDAP attribute mapped to project enabled. (string value) +# Deprecated group/name - [ldap]/tenant_enabled_attribute +#project_enabled_attribute=enabled + +# LDAP attribute mapped to project domain_id. (string value) +# Deprecated group/name - [ldap]/tenant_domain_id_attribute +#project_domain_id_attribute=businessCategory + +# List of attributes stripped off the project on update. (list +# value) +# Deprecated group/name - [ldap]/tenant_attribute_ignore +#project_attribute_ignore= + +# Allow project creation in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_create +#project_allow_create=true + +# Allow project update in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_update +#project_allow_update=true + +# Allow project deletion in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_delete +#project_allow_delete=true + +# If true, Keystone uses an alternative method to determine if +# a project is enabled or not by checking if they are a member +# of the "project_enabled_emulation_dn" group. (boolean value) +# Deprecated group/name - [ldap]/tenant_enabled_emulation +#project_enabled_emulation=false + +# DN of the group entry to hold enabled projects when using +# enabled emulation. (string value) +# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn +#project_enabled_emulation_dn= + +# Additional attribute mappings for projects. Attribute +# mapping format is :, where ldap_attr +# is the attribute in the LDAP entry and user_attr is the +# Identity API attribute. (list value) +# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping +#project_additional_attribute_mapping= + +# Search base for roles. (string value) +#role_tree_dn= + +# LDAP search filter for roles. (string value) +#role_filter= + +# LDAP objectclass for roles. (string value) +#role_objectclass=organizationalRole + +# LDAP attribute mapped to role id. (string value) +#role_id_attribute=cn + +# LDAP attribute mapped to role name. (string value) +#role_name_attribute=ou + +# LDAP attribute mapped to role membership. (string value) +#role_member_attribute=roleOccupant + +# List of attributes stripped off the role on update. (list +# value) +#role_attribute_ignore= + +# Allow role creation in LDAP backend. (boolean value) +#role_allow_create=true + +# Allow role update in LDAP backend. (boolean value) +#role_allow_update=true + +# Allow role deletion in LDAP backend. (boolean value) +#role_allow_delete=true + +# Additional attribute mappings for roles. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#role_additional_attribute_mapping= + +# Search base for groups. (string value) +#group_tree_dn= + +# LDAP search filter for groups. (string value) +#group_filter= + +# LDAP objectclass for groups. (string value) +#group_objectclass=groupOfNames + +# LDAP attribute mapped to group id. (string value) +#group_id_attribute=cn + +# LDAP attribute mapped to group name. (string value) +#group_name_attribute=ou + +# LDAP attribute mapped to show group membership. (string +# value) +#group_member_attribute=member + +# LDAP attribute mapped to group description. (string value) +#group_desc_attribute=description + +# List of attributes stripped off the group on update. (list +# value) +#group_attribute_ignore= + +# Allow group creation in LDAP backend. (boolean value) +#group_allow_create=true + +# Allow group update in LDAP backend. (boolean value) +#group_allow_update=true + +# Allow group deletion in LDAP backend. (boolean value) +#group_allow_delete=true + +# Additional attribute mappings for groups. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#group_additional_attribute_mapping= + +# CA certificate file path for communicating with LDAP +# servers. (string value) +#tls_cacertfile= + +# CA certificate directory path for communicating with LDAP +# servers. (string value) +#tls_cacertdir= + +# Enable TLS for communicating with LDAP servers. (boolean +# value) +#use_tls=false + +# Valid options for tls_req_cert are demand, never, and allow. +# (string value) +#tls_req_cert=demand + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[memcache] + +# +# Options defined in keystone +# + +# Memcache servers in the format of "host:port". (list value) +#servers=localhost:11211 + +# Number of compare-and-set attempts to make when using +# compare-and-set in the token memcache back end. (integer +# value) +#max_compare_and_set_retry=16 + + +[oauth1] + +# +# Options defined in keystone +# + +# Credential backend driver. (string value) +#driver=keystone.contrib.oauth1.backends.sql.OAuth1 + +# Duration (in seconds) for the OAuth Request Token. (integer +# value) +#request_token_duration=28800 + +# Duration (in seconds) for the OAuth Access Token. (integer +# value) +#access_token_duration=86400 + + +[os_inherit] + +# +# Options defined in keystone +# + +# role-assignment inheritance to projects from owning domain +# can be optionally enabled. (boolean value) +#enabled=false + + +[paste_deploy] + +# +# Options defined in keystone +# + +# Name of the paste configuration file that defines the +# available pipelines. (string value) +#config_file=keystone-paste.ini + + +[policy] + +# +# Options defined in keystone +# + +# Policy backend driver. (string value) +#driver=keystone.policy.backends.sql.Policy + +# Maximum number of entities that will be returned in a policy +# collection. (integer value) +#list_limit= + + +[revoke] + +# +# Options defined in keystone +# + +# An implementation of the backend for persisting revocation +# events. (string value) +#driver=keystone.contrib.revoke.backends.kvs.Revoke + +# This value (calculated in seconds) is added to token +# expiration before a revocation event may be removed from the +# backend. (integer value) +#expiration_buffer=1800 + +# Toggle for revocation event cacheing. This has no effect +# unless global caching is enabled. (boolean value) +#caching=true + + +[signing] + +# +# Options defined in keystone +# + +# Deprecated in favor of provider in the [token] section. +# (string value) +#token_format= + +# Path of the certfile for token signing. For non-production +# environments, you may be interested in using `keystone- +# manage pki_setup` to generate self-signed certificates. +# (string value) +#certfile=/etc/keystone/ssl/certs/signing_cert.pem + +# Path of the keyfile for token signing. (string value) +#keyfile=/etc/keystone/ssl/private/signing_key.pem + +# Path of the CA for token signing. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key for token signing. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Key size (in bits) for token signing cert (auto generated +# certificate). (integer value) +#key_size=2048 + +# Days the token signing cert is valid for (auto generated +# certificate). (integer value) +#valid_days=3650 + +# Certificate subject (auto generated certificate) for token +# signing. (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com + + +[ssl] + +# +# Options defined in keystone +# + +# Toggle for SSL support on the Keystone eventlet servers. +# (boolean value) +#enable=false + +# Path of the certfile for SSL. For non-production +# environments, you may be interested in using `keystone- +# manage ssl_setup` to generate self-signed certificates. +# (string value) +#certfile=/etc/keystone/ssl/certs/keystone.pem + +# Path of the keyfile for SSL. (string value) +#keyfile=/etc/keystone/ssl/private/keystonekey.pem + +# Path of the ca cert file for SSL. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key file for SSL. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Require client certificate. (boolean value) +#cert_required=false + +# SSL key length (in bits) (auto generated certificate). +# (integer value) +#key_size=1024 + +# Days the certificate is valid for once signed (auto +# generated certificate). (integer value) +#valid_days=3650 + +# SSL certificate subject (auto generated certificate). +# (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=keystone + + +[stats] + +# +# Options defined in keystone +# + +# Stats backend driver. (string value) +#driver=keystone.contrib.stats.backends.kvs.Stats + + +[token] + +# +# Options defined in keystone +# + +# External auth mechanisms that should add bind information to +# token, e.g., kerberos,x509. (list value) +#bind= + +# Enforcement policy on tokens presented to Keystone with bind +# information. One of disabled, permissive, strict, required +# or a specifically required bind mode, e.g., kerberos or x509 +# to require binding to that authentication. (string value) +#enforce_token_bind=permissive + +# Amount of time a token should remain valid (in seconds). +# (integer value) +#expiration=3600 + +# Controls the token construction, validation, and revocation +# operations. Core providers are +# "keystone.token.providers.[pkiz|pki|uuid].Provider". The +# default provider is pkiz. (string value) +#provider= + +# Token persistence backend driver. (string value) +#driver=keystone.token.backends.sql.Token + +# Toggle for token system cacheing. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# Time to cache the revocation list and the revocation events +# if revoke extension is enabled (in seconds). This has no +# effect unless global and token caching are enabled. (integer +# value) +#revocation_cache_time=3600 + +# Time to cache tokens (in seconds). This has no effect unless +# global and token caching are enabled. (integer value) +#cache_time= + +# Revoke token by token identifier. Setting revoke_by_id to +# true enables various forms of enumerating tokens, e.g. `list +# tokens for user`. These enumerations are processed to +# determine the list of tokens to revoke. Only disable if you +# are switching to using the Revoke extension with a backend +# other than KVS, which stores events in memory. (boolean +# value) +#revoke_by_id=true + +# The hash algorithm to use for PKI tokens. This can be set to +# any algorithm that hashlib supports. WARNING: Before +# changing this value, the auth_token middleware must be +# configured with the hash_algorithms, otherwise token +# revocation will not be processed correctly. (string value) +#hash_algorithm=md5 + + +[trust] + +# +# Options defined in keystone +# + +# Delegation and impersonation features can be optionally +# disabled. (boolean value) +#enabled=true + +# Trust backend driver. (string value) +#driver=keystone.trust.backends.sql.Trust + + diff --git a/openstack/etc/keystone/logging.conf b/openstack/etc/keystone/logging.conf new file mode 100644 index 00000000..21d43c8d --- /dev/null +++ b/openstack/etc/keystone/logging.conf @@ -0,0 +1,39 @@ +[loggers] +keys=root + +[formatters] +keys=normal,normal_with_name,debug + +[handlers] +keys=production,file,devel + +[logger_root] +level=WARNING +handlers=file + +[handler_production] +class=handlers.SysLogHandler +level=ERROR +formatter=normal_with_name +args=(('127.0.0.1', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) + +[handler_file] +class=FileHandler +level=DEBUG +formatter=normal_with_name +args=('/var/log/keystone/keystone.log', 'a') + +[handler_devel] +class=StreamHandler +level=NOTSET +formatter=debug +args=(sys.stdout,) + +[formatter_normal] +format=%(asctime)s %(levelname)s %(message)s + +[formatter_normal_with_name] +format=(%(name)s): %(asctime)s %(levelname)s %(message)s + +[formatter_debug] +format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/openstack/etc/keystone/policy.json b/openstack/etc/keystone/policy.json new file mode 100644 index 00000000..9c7e646e --- /dev/null +++ b/openstack/etc/keystone/policy.json @@ -0,0 +1,144 @@ +{ + "admin_required": "role:admin or is_admin:1", + "service_role": "role:service", + "service_or_admin": "rule:admin_required or rule:service_role", + "owner" : "user_id:%(user_id)s", + "admin_or_owner": "rule:admin_required or rule:owner", + + "default": "rule:admin_required", + + "identity:get_region": "", + "identity:list_regions": "", + "identity:create_region": "rule:admin_required", + "identity:update_region": "rule:admin_required", + "identity:delete_region": "rule:admin_required", + + "identity:get_service": "rule:admin_required", + "identity:list_services": "rule:admin_required", + "identity:create_service": "rule:admin_required", + "identity:update_service": "rule:admin_required", + "identity:delete_service": "rule:admin_required", + + "identity:get_endpoint": "rule:admin_required", + "identity:list_endpoints": "rule:admin_required", + "identity:create_endpoint": "rule:admin_required", + "identity:update_endpoint": "rule:admin_required", + "identity:delete_endpoint": "rule:admin_required", + + "identity:get_domain": "rule:admin_required", + "identity:list_domains": "rule:admin_required", + "identity:create_domain": "rule:admin_required", + "identity:update_domain": "rule:admin_required", + "identity:delete_domain": "rule:admin_required", + + "identity:get_project": "rule:admin_required", + "identity:list_projects": "rule:admin_required", + "identity:list_user_projects": "rule:admin_or_owner", + "identity:create_project": "rule:admin_required", + "identity:update_project": "rule:admin_required", + "identity:delete_project": "rule:admin_required", + + "identity:get_user": "rule:admin_required", + "identity:list_users": "rule:admin_required", + "identity:create_user": "rule:admin_required", + "identity:update_user": "rule:admin_required", + "identity:delete_user": "rule:admin_required", + "identity:change_password": "rule:admin_or_owner", + + "identity:get_group": "rule:admin_required", + "identity:list_groups": "rule:admin_required", + "identity:list_groups_for_user": "rule:admin_or_owner", + "identity:create_group": "rule:admin_required", + "identity:update_group": "rule:admin_required", + "identity:delete_group": "rule:admin_required", + "identity:list_users_in_group": "rule:admin_required", + "identity:remove_user_from_group": "rule:admin_required", + "identity:check_user_in_group": "rule:admin_required", + "identity:add_user_to_group": "rule:admin_required", + + "identity:get_credential": "rule:admin_required", + "identity:list_credentials": "rule:admin_required", + "identity:create_credential": "rule:admin_required", + "identity:update_credential": "rule:admin_required", + "identity:delete_credential": "rule:admin_required", + + "identity:ec2_get_credential": "rule:admin_or_owner", + "identity:ec2_list_credentials": "rule:admin_or_owner", + "identity:ec2_create_credential": "rule:admin_or_owner", + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + + "identity:get_role": "rule:admin_required", + "identity:list_roles": "rule:admin_required", + "identity:create_role": "rule:admin_required", + "identity:update_role": "rule:admin_required", + "identity:delete_role": "rule:admin_required", + + "identity:check_grant": "rule:admin_required", + "identity:list_grants": "rule:admin_required", + "identity:create_grant": "rule:admin_required", + "identity:revoke_grant": "rule:admin_required", + + "identity:list_role_assignments": "rule:admin_required", + + "identity:get_policy": "rule:admin_required", + "identity:list_policies": "rule:admin_required", + "identity:create_policy": "rule:admin_required", + "identity:update_policy": "rule:admin_required", + "identity:delete_policy": "rule:admin_required", + + "identity:check_token": "rule:admin_required", + "identity:validate_token": "rule:service_or_admin", + "identity:validate_token_head": "rule:service_or_admin", + "identity:revocation_list": "rule:service_or_admin", + "identity:revoke_token": "rule:admin_or_owner", + + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", + "identity:get_trust": "rule:admin_or_owner", + "identity:list_trusts": "", + "identity:list_roles_for_trust": "", + "identity:check_role_for_trust": "", + "identity:get_role_for_trust": "", + "identity:delete_trust": "", + + "identity:create_consumer": "rule:admin_required", + "identity:get_consumer": "rule:admin_required", + "identity:list_consumers": "rule:admin_required", + "identity:delete_consumer": "rule:admin_required", + "identity:update_consumer": "rule:admin_required", + + "identity:authorize_request_token": "rule:admin_required", + "identity:list_access_token_roles": "rule:admin_required", + "identity:get_access_token_role": "rule:admin_required", + "identity:list_access_tokens": "rule:admin_required", + "identity:get_access_token": "rule:admin_required", + "identity:delete_access_token": "rule:admin_required", + + "identity:list_projects_for_endpoint": "rule:admin_required", + "identity:add_endpoint_to_project": "rule:admin_required", + "identity:check_endpoint_in_project": "rule:admin_required", + "identity:list_endpoints_for_project": "rule:admin_required", + "identity:remove_endpoint_from_project": "rule:admin_required", + + "identity:create_identity_provider": "rule:admin_required", + "identity:list_identity_providers": "rule:admin_required", + "identity:get_identity_providers": "rule:admin_required", + "identity:update_identity_provider": "rule:admin_required", + "identity:delete_identity_provider": "rule:admin_required", + + "identity:create_protocol": "rule:admin_required", + "identity:update_protocol": "rule:admin_required", + "identity:get_protocol": "rule:admin_required", + "identity:list_protocols": "rule:admin_required", + "identity:delete_protocol": "rule:admin_required", + + "identity:create_mapping": "rule:admin_required", + "identity:get_mapping": "rule:admin_required", + "identity:list_mappings": "rule:admin_required", + "identity:delete_mapping": "rule:admin_required", + "identity:update_mapping": "rule:admin_required", + + "identity:list_projects_for_groups": "", + "identity:list_domains_for_groups": "", + + "identity:list_revoke_events": "" +} diff --git a/openstack/etc/logrotate.d/openstack-glance-api b/openstack/etc/logrotate.d/openstack-glance-api new file mode 100644 index 00000000..fa9b0881 --- /dev/null +++ b/openstack/etc/logrotate.d/openstack-glance-api @@ -0,0 +1,7 @@ +/var/log/glance/glance-api.log { + daily + missingok + compress + delaycompress + notifempty +} diff --git a/openstack/etc/logrotate.d/openstack-glance-registry b/openstack/etc/logrotate.d/openstack-glance-registry new file mode 100644 index 00000000..830fa793 --- /dev/null +++ b/openstack/etc/logrotate.d/openstack-glance-registry @@ -0,0 +1,7 @@ +/var/log/glance/glance-registry.log { + daily + missingok + compress + delaycompress + notifempty +} diff --git a/openstack/etc/logrotate.d/openstack-keystone b/openstack/etc/logrotate.d/openstack-keystone new file mode 100644 index 00000000..2709c72a --- /dev/null +++ b/openstack/etc/logrotate.d/openstack-keystone @@ -0,0 +1,8 @@ +/var/log/keystone/*.log { + daily + missingok + rotate 5 + compress + minsize 100k + copytruncate +} \ No newline at end of file diff --git a/openstack/etc/neutron/api-paste.ini b/openstack/etc/neutron/api-paste.ini new file mode 100644 index 00000000..24193fcd --- /dev/null +++ b/openstack/etc/neutron/api-paste.ini @@ -0,0 +1,30 @@ +[composite:neutron] +use = egg:Paste#urlmap +/: neutronversions +/v2.0: neutronapi_v2_0 + +[composite:neutronapi_v2_0] +use = call:neutron.auth:pipeline_factory +noauth = request_id catch_errors extensions neutronapiapp_v2_0 +keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 + +[filter:request_id] +paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:catch_errors] +paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory + +[filter:keystonecontext] +paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +[filter:extensions] +paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory + +[app:neutronversions] +paste.app_factory = neutron.api.versions:Versions.factory + +[app:neutronapiapp_v2_0] +paste.app_factory = neutron.api.v2.router:APIRouter.factory diff --git a/openstack/etc/neutron/dhcp_agent.ini b/openstack/etc/neutron/dhcp_agent.ini new file mode 100644 index 00000000..de177b6f --- /dev/null +++ b/openstack/etc/neutron/dhcp_agent.ini @@ -0,0 +1,89 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = True +verbose = True + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# resync_interval = 5 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +enable_isolated_metadata = True + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +# enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +# dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +# dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/openstack/etc/neutron/fwaas_driver.ini b/openstack/etc/neutron/fwaas_driver.ini new file mode 100644 index 00000000..41f761ab --- /dev/null +++ b/openstack/etc/neutron/fwaas_driver.ini @@ -0,0 +1,3 @@ +[fwaas] +#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver +#enabled = True diff --git a/openstack/etc/neutron/l3_agent.ini b/openstack/etc/neutron/l3_agent.ini new file mode 100644 index 00000000..e29c88c4 --- /dev/null +++ b/openstack/etc/neutron/l3_agent.ini @@ -0,0 +1,103 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = True +verbose = True + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +# handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +# metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +# send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +# periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +# periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 + +# The working mode for the agent. Allowed values are: +# - legacy: this preserves the existing behavior where the L3 agent is +# deployed on a centralized networking node to provide L3 services +# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR. +# - dvr: this mode enables DVR functionality, and must be used for an L3 +# agent that runs on a compute host. +# - dvr_snat: this enables centralized SNAT support in conjunction with +# DVR. This mode must be used for an L3 agent running on a centralized +# node (or in single-host deployments, e.g. devstack). +# agent_mode = legacy + +# Location to store keepalived and all HA configurations +# ha_confs_path = $state_path/ha_confs + +# VRRP authentication type AH/PASS +# ha_vrrp_auth_type = PASS + +# VRRP authentication password +# ha_vrrp_auth_password = + +# The advertisement interval in seconds +# ha_vrrp_advert_int = 2 diff --git a/openstack/etc/neutron/lbaas_agent.ini b/openstack/etc/neutron/lbaas_agent.ini new file mode 100644 index 00000000..8d231b5c --- /dev/null +++ b/openstack/etc/neutron/lbaas_agent.ini @@ -0,0 +1,42 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output). +# debug = False + +# The LBaaS agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# periodic_interval = 10 + +# LBaas requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version. +# Multiple device drivers reflecting different service providers could be specified: +# device_driver = path.to.provider1.driver.Driver +# device_driver = path.to.provider2.driver.Driver +# Default is: +# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver + +[haproxy] +# Location to store config and state files +# loadbalancer_state_path = $state_path/lbaas + +# The user group +# user_group = nogroup + +# When delete and re-add the same vip, send this many gratuitous ARPs to flush +# the ARP cache in the Router. Set it below or equal to 0 to disable this feature. +# send_gratuitous_arp = 3 diff --git a/openstack/etc/neutron/metadata_agent.ini b/openstack/etc/neutron/metadata_agent.ini new file mode 100644 index 00000000..6a3d0102 --- /dev/null +++ b/openstack/etc/neutron/metadata_agent.ini @@ -0,0 +1,60 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = True +verbose = True + +# The Neutron user information for accessing the Neutron API. +auth_url = ##KEYSTONE_INTERNAL_URL## +auth_region = RegionOne +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = service +admin_user = ##NEUTRON_SERVICE_USER## +admin_password = ##NEUTRON_SERVICE_PASSWORD## + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +nova_metadata_ip = ##NOVA_HOST## + +# TCP Port used by Nova metadata server +# nova_metadata_port = 8775 + +# Which protocol to use for requests to Nova metadata server, http or https +# nova_metadata_protocol = http + +# Whether insecure SSL connection should be accepted for Nova metadata server +# requests +# nova_metadata_insecure = False + +# Client certificate for nova api, needed when nova api requires client +# certificates +# nova_client_cert = + +# Private key for nova client certificate +# nova_client_priv_key = + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +metadata_proxy_shared_secret = ##METADATA_PROXY_SHARED_SECRET## + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server. Defaults to +# half the number of CPU cores +# metadata_workers = + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 4096 + +# URL to connect to the cache backend. +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid for. +# No cache is used in case no value is passed. +# cache_url = memory://?default_ttl=5 diff --git a/openstack/etc/neutron/metering_agent.ini b/openstack/etc/neutron/metering_agent.ini new file mode 100644 index 00000000..88826ce7 --- /dev/null +++ b/openstack/etc/neutron/metering_agent.ini @@ -0,0 +1,18 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = True + +# Default driver: +# driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver +# Example of non-default driver +# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver + +# Interval between two metering measures +# measure_interval = 30 + +# Interval between two metering reports +# report_interval = 300 + +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# use_namespaces = True diff --git a/openstack/etc/neutron/neutron.conf b/openstack/etc/neutron/neutron.conf new file mode 100644 index 00000000..1e832ccd --- /dev/null +++ b/openstack/etc/neutron/neutron.conf @@ -0,0 +1,642 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = True + +# =========Start Global Config Option for Distributed L3 Router=============== +# Setting the "router_distributed" flag to "True" will default to the creation +# of distributed tenant routers. The admin can override this flag by specifying +# the type of the router on the create request (admin-only attribute). Default +# value is "False" to support legacy mode (centralized) routers. +# +# router_distributed = False +# +# ===========End Global Config Option for Distributed L3 Router=============== + +neutron_metadata_proxy_shared_secret=openstack +service_neutron_metadata_proxy=True + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = True + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +# bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +service_plugins = router +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# DVR Base MAC address. The first 3 octets will remain unchanged. If the +# 4th octet is not 00, it will also be used. The others will be randomly +# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to +# avoid mixing them up with MAC's allocated for tenant ports. +# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00 +# The default is 3 octet +# dvr_base_mac = fa:16:3f:00:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds). Use -1 to +# tell dnsmasq to use infinite lease times. +# dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet. For IPv6, validate only if +# gateway is not a link local address. Deprecated, to be removed during the +# K release, at which point the check will be mandatory. +# force_gateway_on_subnet = True + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# Maximum number of routes per router +# max_routes = 30 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Allow automatic rescheduling of routers from dead L3 agents with +# admin_state_up set to True to alive agents. +# allow_automatic_l3agent_failover = False + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== items for l3 extension ============== +# Enable high availability for virtual routers. +# l3_ha = False +# +# Maximum number of l3 agents which a HA router will be scheduled on. If it +# is set to 0 the router will be scheduled on every agent. +# max_l3_agents_per_router = 3 +# +# Minimum number of l3 agents which a HA router will be scheduled on. The +# default value is 2. +# min_l3_agents_per_router = 2 +# +# CIDR of the administrative network if HA mode is enabled +# l3_ha_net_cidr = 169.254.192.0/18 +# =========== end of items for l3 extension ======= + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://onenode:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = ##NOVA_REGION## + +# Username for connection to nova in admin context +nova_admin_username = ##NOVA_SERVICE_USER## + +# The uuid of the admin nova tenant +nova_admin_tenant_id = ##SERVICE_TENANT_ID## + +# Password for connection to nova in admin context. +nova_admin_password = ##NOVA_SERVICE_PASSWORD## + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = ##KEYSTONE_ADMIN_URL## + +# CA file for novaclient to verify server certificates +# nova_ca_certificates_file = + +# Boolean to control ignoring SSL errors on the nova url +# nova_api_insecure = False + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +rabbit_host=##RABBITMQ_HOST## + +# The RabbitMQ broker port where a single node is used. +# (integer value) +rabbit_port=##RABBITMQ_PORT## + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid=##RABBITMQ_USER## + +# The RabbitMQ password. (string value) +rabbit_password=##RABBITMQ_PASSWORD## + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=oslo + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +notification_driver=neutron.openstack.common.notifier.rpc_notifier + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=localhost + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + +[quotas] +# Default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +# default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# quota_network = 10 + +# Number of subnets allowed per tenant. A negative value means unlimited. +# quota_subnet = 10 + +# Number of ports allowed per tenant. A negative value means unlimited. +# quota_port = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +# quota_security_group = 10 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +# quota_security_group_rule = 100 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitor = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +# Number of firewalls allowed per tenant. A negative value means unlimited. +# quota_firewall = 1 + +# Number of firewall policies allowed per tenant. A negative value means +# unlimited. +# quota_firewall_policy = 1 + +# Number of firewall rules allowed per tenant. A negative value means +# unlimited. +# quota_firewall_rule = 100 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +# root_helper = sudo +root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = ##KEYSTONE_PUBLIC_URL## +identity_uri = ##IDENTITY_URI## +admin_tenant_name = service +admin_user = ##NEUTRON_SERVICE_USER## +admin_password = ##NEUTRON_SERVICE_PASSWORD## + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@localhost:3306/neutron +# Replace localhost above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:// +# NOTE: In deployment the [database] section and its connection attribute may +# be set in the corresponding core plugin '.ini' file. However, it is suggested +# to put the [database] section and its connection attribute in this +# configuration file. +#connection=sqlite:////var/lib/neutron/neutron.sqlite +connection=postgresql://##NEUTRON_DB_USER##:##NEUTRON_DB_PASSWORD##@onenode/neutron + +# Database engine for which script will be generated when using offline +# migration +# engine = + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default +# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'. +#service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default +# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend +# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default diff --git a/openstack/etc/neutron/plugins/bigswitch/restproxy.ini b/openstack/etc/neutron/plugins/bigswitch/restproxy.ini new file mode 100644 index 00000000..36e99bd7 --- /dev/null +++ b/openstack/etc/neutron/plugins/bigswitch/restproxy.ini @@ -0,0 +1,114 @@ +# Config file for neutron-proxy-plugin. + +[restproxy] +# All configuration for this plugin is in section '[restproxy]' +# +# The following parameters are supported: +# servers : [,]* (Error if not set) +# server_auth : (default: no auth) +# server_ssl : True | False (default: True) +# ssl_cert_directory : (default: /etc/neutron/plugins/bigswitch/ssl) +# no_ssl_validation : True | False (default: False) +# ssl_sticky : True | False (default: True) +# sync_data : True | False (default: False) +# auto_sync_on_failure : True | False (default: True) +# consistency_interval : (default: 60 seconds) +# server_timeout : (default: 10 seconds) +# neutron_id : (default: neutron-) +# add_meta_server_route : True | False (default: True) +# thread_pool_size : (default: 4) + +# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover. +servers=127.0.0.1:8080 + +# The username and password for authenticating against the BigSwitch or Floodlight controller. +# server_auth=username:password + +# Use SSL when connecting to the BigSwitch or Floodlight controller. +# server_ssl=True + +# Directory which contains the ca_certs and host_certs to be used to validate +# controller certificates. +# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/ + +# If a certificate does not exist for a controller, trust and store the first +# certificate received for that controller and use it to validate future +# connections to that controller. +# ssl_sticky=True + +# Do not validate the controller certificates for SSL +# Warning: This will not provide protection against man-in-the-middle attacks +# no_ssl_validation=False + +# Sync data on connect +# sync_data=False + +# If neutron fails to create a resource because the backend controller +# doesn't know of a dependency, automatically trigger a full data +# synchronization to the controller. +# auto_sync_on_failure=True + +# Time between verifications that the backend controller +# database is consistent with Neutron. (0 to disable) +# consistency_interval = 60 + +# Maximum number of seconds to wait for proxy request to connect and complete. +# server_timeout=10 + +# User defined identifier for this Neutron deployment +# neutron_id = + +# Flag to decide if a route to the metadata server should be injected into the VM +# add_meta_server_route = True + +# Number of threads to use to handle large volumes of port creation requests +# thread_pool_size = 4 + +[nova] +# Specify the VIF_TYPE that will be controlled on the Nova compute instances +# options: ivs or ovs +# default: ovs +# vif_type = ovs + +# Overrides for vif types based on nova compute node host IDs +# Comma separated list of host IDs to fix to a specific VIF type +# The VIF type is taken from the end of the configuration item +# node_override_vif_ +# For example, the following would set the VIF type to IVS for +# host-id1 and host-id2 +# node_overrride_vif_ivs=host-id1,host-id2 + +[router] +# Specify the default router rules installed in newly created tenant routers +# Specify multiple times for multiple rules +# Format is ::: +# Optionally, a comma-separated list of nexthops may be included after +# Use an * to specify default for all tenants +# Default is any any allow for all tenants +# tenant_default_router_rule=*:any:any:permit + +# Maximum number of rules that a single router may have +# Default is 200 +# max_router_rules=200 + +[restproxyagent] + +# Specify the name of the bridge used on compute nodes +# for attachment. +# Default: br-int +# integration_bridge=br-int + +# Change the frequency of polling by the restproxy agent. +# Value is seconds +# Default: 5 +# polling_interval=5 + +# Virtual switch type on the compute node. +# Options: ovs or ivs +# Default: ovs +# virtual_switch_type = ovs + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/openstack/etc/neutron/plugins/bigswitch/ssl/ca_certs/README b/openstack/etc/neutron/plugins/bigswitch/ssl/ca_certs/README new file mode 100644 index 00000000..e7e47a27 --- /dev/null +++ b/openstack/etc/neutron/plugins/bigswitch/ssl/ca_certs/README @@ -0,0 +1,3 @@ +Certificates in this folder will be used to +verify signatures for any controllers the plugin +connects to. diff --git a/openstack/etc/neutron/plugins/bigswitch/ssl/host_certs/README b/openstack/etc/neutron/plugins/bigswitch/ssl/host_certs/README new file mode 100644 index 00000000..8f5f5e77 --- /dev/null +++ b/openstack/etc/neutron/plugins/bigswitch/ssl/host_certs/README @@ -0,0 +1,6 @@ +Certificates in this folder must match the name +of the controller they should be used to authenticate +with a .pem extension. + +For example, the certificate for the controller +"192.168.0.1" should be named "192.168.0.1.pem". diff --git a/openstack/etc/neutron/plugins/brocade/brocade.ini b/openstack/etc/neutron/plugins/brocade/brocade.ini new file mode 100644 index 00000000..916e9e5d --- /dev/null +++ b/openstack/etc/neutron/plugins/brocade/brocade.ini @@ -0,0 +1,29 @@ +[switch] +# username = The SSH username to use +# password = The SSH password to use +# address = The address of the host to SSH to +# ostype = Should be NOS, but is unused otherwise +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS + +[physical_interface] +# physical_interface = The network interface to use when creating a port +# +# Example: +# physical_interface = physnet1 + +[vlans] +# network_vlan_ranges = :nnnn:mmmm +# +# Example: +# network_vlan_ranges = physnet1:1000:2999 + +[linux_bridge] +# physical_interface_mappings = : +# +# Example: +# physical_interface_mappings = physnet1:em1 diff --git a/openstack/etc/neutron/plugins/cisco/cisco_cfg_agent.ini b/openstack/etc/neutron/plugins/cisco/cisco_cfg_agent.ini new file mode 100644 index 00000000..d99e8382 --- /dev/null +++ b/openstack/etc/neutron/plugins/cisco/cisco_cfg_agent.ini @@ -0,0 +1,15 @@ +[cfg_agent] +# (IntOpt) Interval in seconds for processing of service updates. +# That is when the config agent's process_services() loop executes +# and it lets each service helper to process its service resources. +# rpc_loop_interval = 10 + +# (StrOpt) Period-separated module path to the routing service helper class. +# routing_svc_helper_class = neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper + +# (IntOpt) Timeout value in seconds for connecting to a hosting device. +# device_connection_timeout = 30 + +# (IntOpt) The time in seconds until a backlogged hosting device is +# presumed dead or booted to an error state. +# hosting_device_dead_timeout = 300 diff --git a/openstack/etc/neutron/plugins/cisco/cisco_plugins.ini b/openstack/etc/neutron/plugins/cisco/cisco_plugins.ini new file mode 100644 index 00000000..a93bc7f1 --- /dev/null +++ b/openstack/etc/neutron/plugins/cisco/cisco_plugins.ini @@ -0,0 +1,107 @@ +[cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# provider VLAN interface. For example, if an interface is being created +# for provider VLAN 3003 it will be named 'p-3003' using the default prefix. +# +# provider_vlan_name_prefix = p- +# Example: provider_vlan_name_prefix = PV- + +# (BoolOpt) A flag indicating whether Openstack networking should manage the +# creation and removal of VLAN interfaces for provider networks on the Nexus +# switches. If the flag is set to False then Openstack will not create or +# remove VLAN interfaces for provider networks, and the administrator needs +# to manage these interfaces manually or by external orchestration. +# +# provider_vlan_auto_create = True + +# (BoolOpt) A flag indicating whether Openstack networking should manage +# the adding and removing of provider VLANs from trunk ports on the Nexus +# switches. If the flag is set to False then Openstack will not add or +# remove provider VLANs from trunk ports, and the administrator needs to +# manage these operations manually or by external orchestration. +# +# provider_vlan_auto_trunk = True + +# (StrOpt) Period-separated module path to the model class to use for +# the Cisco neutron plugin. +# +# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2 + +# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches. +# Note: This feature is not supported on all models/versions of Cisco +# Nexus switches. To use this feature, all of the Nexus switches in the +# deployment must support it. +# nexus_l3_enable = False + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# N1KV Format. +# [N1KV:] +# username= +# password= +# +# Example: +# [N1KV:2.2.2.2] +# username=admin +# password=mySecretPassword + +[cisco_n1k] + +# (StrOpt) Specify the name of the integration bridge to which the VIFs are +# attached. +# Default value: br-int +# integration_bridge = br-int + +# (StrOpt) Name of the policy profile to be associated with a port when no +# policy profile is specified during port creates. +# Default value: service_profile +# default_policy_profile = service_profile + +# (StrOpt) Name of the policy profile to be associated with a port owned by +# network node (dhcp, router). +# Default value: dhcp_pp +# network_node_policy_profile = dhcp_pp + +# (StrOpt) Name of the network profile to be associated with a network when no +# network profile is specified during network creates. Admin should pre-create +# a network profile with this name. +# Default value: default_network_profile +# default_network_profile = network_pool + +# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in +# policy profiles. +# Default value: 60 +# poll_duration = 60 + +# (BoolOpt) Specify whether tenants are restricted from accessing all the +# policy profiles. +# Default value: False, indicating all tenants can access all policy profiles. +# +# restrict_policy_profiles = False + +# (IntOpt) Number of threads to use to make HTTP requests to the VSM. +# Default value: 4 +# http_pool_size = 4 + +# (IntOpt) Timeout duration in seconds for the http request +# Default value: 15 +# http_timeout = 15 + +# (BoolOpt) Specify whether tenants are restricted from accessing network +# profiles belonging to other tenants. +# Default value: True, indicating other tenants cannot access network +# profiles belonging to a tenant. +# +# restrict_network_profiles = True diff --git a/openstack/etc/neutron/plugins/cisco/cisco_router_plugin.ini b/openstack/etc/neutron/plugins/cisco/cisco_router_plugin.ini new file mode 100644 index 00000000..3ef271d2 --- /dev/null +++ b/openstack/etc/neutron/plugins/cisco/cisco_router_plugin.ini @@ -0,0 +1,76 @@ +[general] +#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers +# backlog_processing_interval = 10 + +#(StrOpt) Name of the L3 admin tenant +# l3_admin_tenant = L3AdminTenant + +#(StrOpt) Name of management network for hosting device configuration +# management_network = osn_mgmt_nw + +#(StrOpt) Default security group applied on management port +# default_security_group = mgmt_sec_grp + +#(IntOpt) Seconds of no status update until a cfg agent is considered down +# cfg_agent_down_time = 60 + +#(StrOpt) Path to templates for hosting devices +# templates_path = /opt/stack/data/neutron/cisco/templates + +#(StrOpt) Path to config drive files for service VM instances +# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive + +#(BoolOpt) Ensure that Nova is running before attempting to create any VM +# ensure_nova_running = True + +[hosting_devices] +# Settings coupled to CSR1kv VM devices +# ------------------------------------- +#(StrOpt) Name of Glance image for CSR1kv +# csr1kv_image = csr1kv_openstack_img + +#(StrOpt) UUID of Nova flavor for CSR1kv +# csr1kv_flavor = 621 + +#(StrOpt) Plugging driver for CSR1kv +# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver + +#(StrOpt) Hosting device driver for CSR1kv +# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver + +#(StrOpt) Config agent router service driver for CSR1kv +# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver + +#(StrOpt) Configdrive template file for CSR1kv +# csr1kv_configdrive_template = csr1kv_cfg_template + +#(IntOpt) Booting time in seconds before a CSR1kv becomes operational +# csr1kv_booting_time = 420 + +#(StrOpt) Username to use for CSR1kv configurations +# csr1kv_username = stack + +#(StrOpt) Password to use for CSR1kv configurations +# csr1kv_password = cisco + +[n1kv] +# Settings coupled to inter-working with N1kv plugin +# -------------------------------------------------- +#(StrOpt) Name of N1kv port profile for management ports +# management_port_profile = osn_mgmt_pp + +#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic +# from VXLAN segmented networks). +# t1_port_profile = osn_t1_pp + +#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic +# from VLAN segmented networks). +# t2_port_profile = osn_t2_pp + +#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks +# for VXLAN segmented traffic). +# t1_network_profile = osn_t1_np + +#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks +# for VLAN segmented traffic). +# t2_network_profile = osn_t2_np diff --git a/openstack/etc/neutron/plugins/cisco/cisco_vpn_agent.ini b/openstack/etc/neutron/plugins/cisco/cisco_vpn_agent.ini new file mode 100644 index 00000000..0aee17eb --- /dev/null +++ b/openstack/etc/neutron/plugins/cisco/cisco_vpn_agent.ini @@ -0,0 +1,26 @@ +[cisco_csr_ipsec] +# Status check interval in seconds, for VPNaaS IPSec connections used on CSR +# status_check_interval = 60 + +# Cisco CSR management port information for REST access used by VPNaaS +# TODO(pcm): Remove once CSR is integrated in as a Neutron router. +# +# Format is: +# [cisco_csr_rest:] +# rest_mgmt = +# tunnel_ip = +# username = +# password = +# timeout = +# host = +# tunnel_if = +# +# where: +# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR) +# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel +# mgmt port IP -- IP address of CSR for REST API access +# user ---------- Username for REST management port access to Cisco CSR +# password ------ Password for REST management port access to Cisco CSR +# timeout ------- REST request timeout to Cisco CSR (optional) +# hostname ------ Name of host where CSR is running as a VM +# tunnel I/F ---- CSR port name used for tunnels' IP address diff --git a/openstack/etc/neutron/plugins/embrane/heleos_conf.ini b/openstack/etc/neutron/plugins/embrane/heleos_conf.ini new file mode 100644 index 00000000..0ca9b46f --- /dev/null +++ b/openstack/etc/neutron/plugins/embrane/heleos_conf.ini @@ -0,0 +1,41 @@ +[heleos] +#configure the ESM management address +#in the first version of this plugin, only one ESM can be specified +#Example: +#esm_mgmt= + +#configure admin username and password +#admin_username= +#admin_password= + +#router image id +#Example: +#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0 + +#mgmt shared security zone id +#defines the shared management security zone. Each tenant can have a private one configured through the ESM +#Example: +#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a + +#in-band shared security zone id +#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM +#Example: +#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc + +#oob-band shared security zone id +#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM +#Example: +#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871 + +#dummy security zone id +#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces +#Example: +#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08 + +#resource pool id +#define the shared resource pool. Each tenant can have a private one configured through the ESM +#Example +#resource_pool_id= + +#define if the requests have to be executed asynchronously by the plugin or not +#async_requests= diff --git a/openstack/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini b/openstack/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini new file mode 100644 index 00000000..5eeec570 --- /dev/null +++ b/openstack/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini @@ -0,0 +1,63 @@ +[hyperv] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST either change this +# to 'vlan' and configure network_vlan_ranges below or to 'flat'. +# Set to 'none' to disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only gre and local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (ListOpt) Comma separated list of : +# where the physical networks can be expressed with wildcards, +# e.g.: ."*:external". +# The referred external virtual switches need to be already present on +# the Hyper-V server. +# If a given physical network name will not match any value in the list +# the plugin will look for a virtual switch with the same name. +# +# physical_network_vswitch_mappings = *:external +# Example: physical_network_vswitch_mappings = net1:external1,net2:external2 + +# (StrOpt) Private virtual switch name used for local networking. +# +# local_network_vswitch = private +# Example: local_network_vswitch = custom_vswitch + +# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's +# metric APIs. Collected data can by retrieved by other apps and services, +# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above. +# +# enable_metrics_collection = False + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# Neutron server: +# +# [HYPERV] +# tenant_network_type = vlan +# network_vlan_ranges = default:2000:3999 +# +# Agent running on Hyper-V node: +# +# [AGENT] +# polling_interval = 2 +# physical_network_vswitch_mappings = *:external +# local_network_vswitch = private diff --git a/openstack/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini b/openstack/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini new file mode 100644 index 00000000..0fab5070 --- /dev/null +++ b/openstack/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini @@ -0,0 +1,50 @@ +[sdnve] +# (ListOpt) The IP address of one (or more) SDN-VE controllers +# Default value is: controller_ips = 127.0.0.1 +# Example: controller_ips = 127.0.0.1,127.0.0.2 +# (StrOpt) The integration bridge for OF based implementation +# The default value for integration_bridge is None +# Example: integration_bridge = br-int +# (ListOpt) The interface mapping connecting the integration +# bridge to external network as a list of physical network names and +# interfaces: : +# Example: interface_mappings = default:eth2 +# (BoolOpt) Used to reset the integration bridge, if exists +# The default value for reset_bridge is True +# Example: reset_bridge = False +# (BoolOpt) Used to set the OVS controller as out-of-band +# The default value for out_of_band is True +# Example: out_of_band = False +# +# (BoolOpt) The fake controller for testing purposes +# Default value is: use_fake_controller = False +# (StrOpt) The port number for use with controller +# The default value for the port is 8443 +# Example: port = 8443 +# (StrOpt) The userid for use with controller +# The default value for the userid is admin +# Example: userid = sdnve_user +# (StrOpt) The password for use with controller +# The default value for the password is admin +# Example: password = sdnve_password +# +# (StrOpt) The default type of tenants (and associated resources) +# Available choices are: OVERLAY or OF +# The default value for tenant type is OVERLAY +# Example: default_tenant_type = OVERLAY +# (StrOpt) The string in tenant description that indicates +# Default value for OF tenants: of_signature = SDNVE-OF +# (StrOpt) The string in tenant description that indicates +# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY + +[sdnve_agent] +# (IntOpt) Agent's polling interval in seconds +# polling_interval = 2 +# (StrOpt) What to use for root helper +# The default value: root_helper = 'sudo' +# (BoolOpt) Whether to use rpc or not +# The default value: rpc = True + +[securitygroup] +# The security group is not supported: +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver diff --git a/openstack/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini b/openstack/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini new file mode 100644 index 00000000..94fe9803 --- /dev/null +++ b/openstack/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini @@ -0,0 +1,78 @@ +[vlans] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST change this to +# 'vlan' and configure network_vlan_ranges below in order for tenant +# networks to provide connectivity between hosts. Set to 'none' to +# disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +[linux_bridge] +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to the agent's node-specific physical network +# interfaces to be used for flat and VLAN networks. All physical +# networks listed in network_vlan_ranges on the server should have +# mappings to appropriate interfaces on each agent. +# +# physical_interface_mappings = +# Example: physical_interface_mappings = physnet1:eth1 + +[vxlan] +# (BoolOpt) enable VXLAN on the agent +# VXLAN support can be enabled when agent is managed by ml2 plugin using +# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin. +# enable_vxlan = False +# +# (IntOpt) use specific TTL for vxlan interface protocol packets +# ttl = +# +# (IntOpt) use specific TOS for vxlan interface protocol packets +# tos = +# +# (StrOpt) multicast group to use for broadcast emulation. +# This group must be the same on all the agents. +# vxlan_group = 224.0.0.1 +# +# (StrOpt) Local IP address to use for VXLAN endpoints (required) +# local_ip = +# +# (BoolOpt) Flag to enable l2population extension. This option should be used +# in conjunction with ml2 plugin l2population mechanism driver (in that case, +# both linuxbridge and l2population mechanism drivers should be loaded). +# It enables plugin to populate VXLAN forwarding table, in order to limit +# the use of broadcast emulation (multicast will be turned off if kernel and +# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10) +# l2_population = False + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (BoolOpt) Enable server RPC compatibility with old (pre-havana) +# agents. +# +# rpc_support_old_agents = False +# Example: rpc_support_old_agents = True + +[securitygroup] +# Firewall driver for realizing neutron security group function +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/openstack/etc/neutron/plugins/metaplugin/metaplugin.ini b/openstack/etc/neutron/plugins/metaplugin/metaplugin.ini new file mode 100644 index 00000000..2b9bfa5e --- /dev/null +++ b/openstack/etc/neutron/plugins/metaplugin/metaplugin.ini @@ -0,0 +1,31 @@ +# Config file for Metaplugin + +[meta] +# Comma separated list of flavor:neutron_plugin for plugins to load. +# Extension method is searched in the list order and the first one is used. +plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2' + +# Comma separated list of flavor:neutron_plugin for L3 service plugins +# to load. +# This is intended for specifying L2 plugins which support L3 functions. +# If you use a router service plugin, set this blank. +l3_plugin_list = + +# Default flavor to use, when flavor:network is not specified at network +# creation. +default_flavor = 'nvp' + +# Default L3 flavor to use, when flavor:router is not specified at router +# creation. +# Ignored if 'l3_plugin_list' is blank. +default_l3_flavor = + +# Comma separated list of supported extension aliases. +supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler' + +# Comma separated list of method:flavor to select specific plugin for a method. +# This has priority over method search order based on 'plugin_list'. +extension_map = 'get_port_stats:nvp' + +# Specifies flavor for plugin to handle 'q-plugin' RPC requests. +rpc_flavor = 'ml2' diff --git a/openstack/etc/neutron/plugins/midonet/midonet.ini b/openstack/etc/neutron/plugins/midonet/midonet.ini new file mode 100644 index 00000000..6cc02117 --- /dev/null +++ b/openstack/etc/neutron/plugins/midonet/midonet.ini @@ -0,0 +1,19 @@ + +[midonet] +# MidoNet API server URI +# midonet_uri = http://127.0.0.1:8080/midonet-api + +# MidoNet admin username +# username = admin + +# MidoNet admin password +# password = passw0rd + +# ID of the project that MidoNet admin user belongs to +# project_id = 77777777-7777-7777-7777-777777777777 + +# Virtual provider router ID +# provider_router_id = 00112233-0011-0011-0011-001122334455 + +# Path to midonet host uuid file +# midonet_host_uuid_path = /etc/midolman/host_uuid.properties diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf.ini new file mode 100644 index 00000000..58e5fe21 --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf.ini @@ -0,0 +1,85 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan +type_drivers = flat,gre + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan +tenant_network_types = gre + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade +mechanism_drivers = openvswitch + +# (ListOpt) Ordered list of extension driver entrypoints +# to be loaded from the neutron.ml2.extension_drivers namespace. +# extension_drivers = +# Example: extension_drivers = anewextensiondriver + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +# flat_networks = +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * +flat_networks = External + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 +#network_vlan_ranges = Physnet1:100:200 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +tunnel_id_ranges = 1:1000 + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +# vxlan_group = +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +enable_security_group = True + +# Use ipset to speed-up the iptables security groups. Enabling ipset support +# requires that ipset is installed on L2 agent node. +enable_ipset = True + +[ovs] +local_ip = onenode +enable_tunneling = True +bridge_mappings=External:br-ex,Physnet1:br-eth1 + +[agent] +tunnel_types = gre diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf_arista.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf_arista.ini new file mode 100644 index 00000000..abaf5bc7 --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf_arista.ini @@ -0,0 +1,100 @@ +# Defines configuration options specific for Arista ML2 Mechanism driver + +[ml2_arista] +# (StrOpt) EOS IP address. This is required field. If not set, all +# communications to Arista EOS will fail +# +# eapi_host = +# Example: eapi_host = 192.168.0.1 +# +# (StrOpt) EOS command API username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_username = +# Example: arista_eapi_username = admin +# +# (StrOpt) EOS command API password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_password = +# Example: eapi_password = my_password +# +# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs +# ("node1.domain.com") or as short names ("node1"). This is +# optional. If not set, a value of "True" is assumed. +# +# use_fqdn = +# Example: use_fqdn = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# sync_interval = +# Example: sync_interval = 60 +# +# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller. +# This is useful when multiple OpenStack/Neutron controllers are +# managing the same Arista HW clusters. Note that this name must +# match with the region name registered (or known) to keystone +# service. Authentication with Keysotne is performed by EOS. +# This is optional. If not set, a value of "RegionOne" is assumed. +# +# region_name = +# Example: region_name = RegionOne + + +[l3_arista] + +# (StrOpt) primary host IP address. This is required field. If not set, all +# communications to Arista EOS will fail. This is the host where +# primary router is created. +# +# primary_l3_host = +# Example: primary_l3_host = 192.168.10.10 +# +# (StrOpt) Primary host username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# primary_l3_host_username = +# Example: arista_primary_l3_username = admin +# +# (StrOpt) Primary host password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# primary_l3_host_password = +# Example: primary_l3_password = my_password +# +# (StrOpt) IP address of the second Arista switch paired as +# MLAG (Multi-chassis Link Aggregation) with the first. +# This is optional field, however, if mlag_config flag is set, +# then this is a required field. If not set, all +# communications to Arista EOS will fail. If mlag_config is set +# to False, then this field is ignored +# +# seconadary_l3_host = +# Example: seconadary_l3_host = 192.168.10.20 +# +# (BoolOpt) Defines if Arista switches are configured in MLAG mode +# If yes, all L3 configuration is pushed to both switches +# automatically. If this flag is set, ensure that secondary_l3_host +# is set to the second switch's IP. +# This flag is Optional. If not set, a value of "False" is assumed. +# +# mlag_config = +# Example: mlag_config = True +# +# (BoolOpt) Defines if the router is created in default VRF or a +# a specific VRF. This is optional. +# If not set, a value of "False" is assumed. +# +# Example: use_vrf = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# l3_sync_interval = +# Example: l3_sync_interval = 60 diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf_brocade.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf_brocade.ini new file mode 100644 index 00000000..67574110 --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf_brocade.ini @@ -0,0 +1,15 @@ +[ml2_brocade] +# username = +# password = +# address = +# ostype = NOS +# osversion = autodetect | n.n.n +# physical_networks = physnet1,physnet2 +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS +# osversion = 4.1.1 +# physical_networks = physnet1,physnet2 diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf_cisco.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf_cisco.ini new file mode 100644 index 00000000..1b69100e --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf_cisco.ini @@ -0,0 +1,118 @@ +[ml2_cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# +# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch. +# This string value must be present in the ml2_conf.ini network_vlan_ranges +# variable. +# +# managed_physical_network = +# Example: managed_physical_network = physnet1 + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# Cisco Nexus Switch Format. +# [ml2_mech_cisco_nexus:] +# = (1) +# ssh_port= (2) +# username= (3) +# password= (4) +# +# (1) For each host connected to a port on the switch, specify the hostname +# and the Nexus physical port (interface) it is connected to. +# Valid intf_type's are 'ethernet' and 'port-channel'. +# The default setting for is 'ethernet' and need not be +# added to this setting. +# (2) The TCP port for connecting via SSH to manage the switch. This is +# port number 22 unless the switch has been configured otherwise. +# (3) The username for logging into the switch to manage it. +# (4) The password for logging into the switch to manage it. +# +# Example: +# [ml2_mech_cisco_nexus:1.1.1.1] +# compute1=1/1 +# compute2=ethernet:1/2 +# compute3=port-channel:1 +# ssh_port=22 +# username=admin +# password=mySecretPassword + +[ml2_cisco_apic] + +# Hostname:port list of APIC controllers +# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80 + +# Username for the APIC controller +# apic_username = user + +# Password for the APIC controller +# apic_password = password + +# Whether use SSl for connecting to the APIC controller or not +# apic_use_ssl = True + +# How to map names to APIC: use_uuid or use_name +# apic_name_mapping = use_name + +# Names for APIC objects used by Neutron +# Note: When deploying multiple clouds against one APIC, +# these names must be unique between the clouds. +# apic_vmm_domain = openstack +# apic_vlan_ns_name = openstack_ns +# apic_node_profile = openstack_profile +# apic_entity_profile = openstack_entity +# apic_function_profile = openstack_function +# apic_app_profile_name = openstack_app +# Agent timers for State reporting and topology discovery +# apic_sync_interval = 30 +# apic_agent_report_interval = 30 +# apic_agent_poll_interval = 2 + +# Specify your network topology. +# This section indicates how your compute nodes are connected to the fabric's +# switches and ports. The format is as follows: +# +# [apic_switch:] +# , = +# +# You can have multiple sections, one for each switch in your fabric that is +# participating in Openstack. e.g. +# +# [apic_switch:17] +# ubuntu,ubuntu1 = 1/10 +# ubuntu2,ubuntu3 = 1/11 +# +# [apic_switch:18] +# ubuntu5,ubuntu6 = 1/1 +# ubuntu7,ubuntu8 = 1/2 + +# Describe external connectivity. +# In this section you can specify the external network configuration in order +# for the plugin to be able to teach the fabric how to route the internal +# traffic to the outside world. The external connectivity configuration +# format is as follows: +# +# [apic_external_network:] +# switch = +# port = +# encap = +# cidr_exposed = +# gateway_ip = +# +# An example follows: +# [apic_external_network:network_ext] +# switch=203 +# port=1/34 +# encap=vlan-100 +# cidr_exposed=10.10.40.2/16 +# gateway_ip=10.10.40.1 diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini new file mode 100644 index 00000000..6ee4a4e0 --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini @@ -0,0 +1,52 @@ +# Defines Configuration options for FSL SDN OS Mechanism Driver +# Cloud Resource Discovery (CRD) authorization credentials +[ml2_fslsdn] +#(StrOpt) User name for authentication to CRD. +# e.g.: user12 +# +# crd_user_name = + +#(StrOpt) Password for authentication to CRD. +# e.g.: secret +# +# crd_password = + +#(StrOpt) Tenant name for CRD service. +# e.g.: service +# +# crd_tenant_name = + +#(StrOpt) CRD auth URL. +# e.g.: http://127.0.0.1:5000/v2.0/ +# +# crd_auth_url = + +#(StrOpt) URL for connecting to CRD Service. +# e.g.: http://127.0.0.1:9797 +# +# crd_url= + +#(IntOpt) Timeout value for connecting to CRD service +# in seconds, e.g.: 30 +# +# crd_url_timeout= + +#(StrOpt) Region name for connecting to CRD in +# admin context, e.g.: RegionOne +# +# crd_region_name= + +#(BoolOpt)If set, ignore any SSL validation issues (boolean value) +# e.g.: False +# +# crd_api_insecure= + +#(StrOpt)Authorization strategy for connecting to CRD in admin +# context, e.g.: keystone +# +# crd_auth_strategy= + +#(StrOpt)Location of CA certificates file to use for CRD client +# requests. +# +# crd_ca_certificates_file= diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini new file mode 100644 index 00000000..46139aed --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini @@ -0,0 +1,4 @@ +[eswitch] +# (StrOpt) Type of Network Interface to allocate for VM: +# mlnx_direct or hostdev according to libvirt terminology +# vnic_type = mlnx_direct diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf_ncs.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf_ncs.ini new file mode 100644 index 00000000..dbbfcbd2 --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf_ncs.ini @@ -0,0 +1,28 @@ +# Defines configuration options specific to the Tail-f NCS Mechanism Driver + +[ml2_ncs] +# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack +# subtree. +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://ncs/api/running/services/openstack + +# (StrOpt) Username for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = +# Example: timeout = 15 + diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf_odl.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf_odl.ini new file mode 100644 index 00000000..9e88c1bb --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf_odl.ini @@ -0,0 +1,30 @@ +# Configuration for the OpenDaylight MechanismDriver + +[ml2_odl] +# (StrOpt) OpenDaylight REST URL +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron + +# (StrOpt) Username for HTTP basic authentication to ODL. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to ODL. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = 10 +# Example: timeout = 15 + +# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout. +# This is an optional parameter, default value is 30 minutes. +# +# session_timeout = 30 +# Example: session_timeout = 60 diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf_ofa.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf_ofa.ini new file mode 100644 index 00000000..4a94b987 --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf_ofa.ini @@ -0,0 +1,13 @@ +# Defines configuration options specific to the OpenFlow Agent Mechanism Driver + +[ovs] +# Please refer to configuration options to the OpenvSwitch + +[agent] +# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath. +# This is an optional parameter, default value is 60 seconds. +# +# get_datapath_retry_times = +# Example: get_datapath_retry_times = 30 + +# Please refer to configuration options to the OpenvSwitch else the above. diff --git a/openstack/etc/neutron/plugins/ml2/ml2_conf_sriov.ini b/openstack/etc/neutron/plugins/ml2/ml2_conf_sriov.ini new file mode 100644 index 00000000..f9522e7f --- /dev/null +++ b/openstack/etc/neutron/plugins/ml2/ml2_conf_sriov.ini @@ -0,0 +1,31 @@ +# Defines configuration options for SRIOV NIC Switch MechanismDriver +# and Agent + +[ml2_sriov] +# (ListOpt) Comma-separated list of +# supported Vendor PCI Devices, in format vendor_id:product_id +# +# supported_pci_vendor_devs = 15b3:1004, 8086:10ca +# Example: supported_pci_vendor_devs = 15b3:1004 +# +# (BoolOpt) Requires running SRIOV neutron agent for port binding +# agent_required = False + +[sriov_nic] +# (ListOpt) Comma-separated list of : +# tuples mapping physical network names to the agent's node-specific +# physical network device interfaces of SR-IOV physical function to be used +# for VLAN networks. All physical networks listed in network_vlan_ranges on +# the server should have mappings to appropriate interfaces on each agent. +# +# physical_device_mappings = +# Example: physical_device_mappings = physnet1:eth1 +# +# (ListOpt) Comma-separated list of : +# tuples, mapping network_device to the agent's node-specific list of virtual +# functions that should not be used for virtual networking. +# vfs_to_exclude is a semicolon-separated list of virtual +# functions to exclude from network_device. The network_device in the +# mapping should appear in the physical_device_mappings list. +# exclude_devices = +# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3 diff --git a/openstack/etc/neutron/plugins/mlnx/mlnx_conf.ini b/openstack/etc/neutron/plugins/mlnx/mlnx_conf.ini new file mode 100644 index 00000000..b1225111 --- /dev/null +++ b/openstack/etc/neutron/plugins/mlnx/mlnx_conf.ini @@ -0,0 +1,79 @@ +[mlnx] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value is 'vlan' You MUST configure network_vlan_ranges below +# in order for tenant networks to provide connectivity between hosts. +# Set to 'none' to disable creation of tenant networks. +# +# tenant_network_type = vlan +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = default:1:100 + +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to physical network types. All physical +# networks listed in network_vlan_ranges should have +# mappings to appropriate physical network type. +# Type of the physical network can be either eth (Ethernet) or +# ib (InfiniBand). If empty, physical network eth type is assumed. +# +# physical_network_type_mappings = +# Example: physical_network_type_mappings = default:eth + +# (StrOpt) Type of the physical network, can be either 'eth' or 'ib' +# The default value is 'eth' +# physical_network_type = eth + +[eswitch] +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to the agent's node-specific physical network +# interfaces to be used for flat and VLAN networks. All physical +# networks listed in network_vlan_ranges on the server should have +# mappings to appropriate interfaces on each agent. +# +# physical_interface_mappings = +# Example: physical_interface_mappings = default:eth2 + +# (StrOpt) Type of Network Interface to allocate for VM: +# direct or hosdev according to libvirt terminology +# vnic_type = mlnx_direct + +# (StrOpt) Eswitch daemon end point connection url +# daemon_endpoint = 'tcp://127.0.0.1:60001' + +# The number of milliseconds the agent will wait for +# response on request to daemon +# request_timeout = 3000 + +# The number of retries the agent will send request +# to daemon before giving up +# retries = 3 + +# The backoff rate multiplier for waiting period between retries +# on request to daemon, i.e. value of 2 will double +# the request timeout each retry +# backoff_rate = 2 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (BoolOpt) Enable server RPC compatibility with old (pre-havana) +# agents. +# +# rpc_support_old_agents = False + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/openstack/etc/neutron/plugins/nec/nec.ini b/openstack/etc/neutron/plugins/nec/nec.ini new file mode 100644 index 00000000..798a5a61 --- /dev/null +++ b/openstack/etc/neutron/plugins/nec/nec.ini @@ -0,0 +1,63 @@ +# Sample Configurations + +[ovs] +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch port". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# integration_bridge = br-int + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +[securitygroup] +# Firewall driver for realizing neutron security group function +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[ofc] +# Specify OpenFlow Controller Host, Port and Driver to connect. +# host = 127.0.0.1 +# port = 8888 + +# Base URL of OpenFlow Controller REST API. +# It is prepended to a path of each API request. +# path_prefix = + +# Drivers are in neutron/plugins/nec/drivers/ . +# driver = trema + +# PacketFilter is available when it's enabled in this configuration +# and supported by the driver. +# enable_packet_filter = true + +# Support PacketFilter on OFC router interface +# support_packet_filter_on_ofc_router = true + +# Use SSL to connect +# use_ssl = false + +# Key file +# key_file = + +# Certificate file +# cert_file = + +# Disable SSL certificate verification +# insecure_ssl = false + +# Maximum attempts per OFC API request. NEC plugin retries +# API request to OFC when OFC returns ServiceUnavailable (503). +# The value must be greater than 0. +# api_max_attempts = 3 + +[provider] +# Default router provider to use. +# default_router_provider = l3-agent +# List of enabled router providers. +# router_providers = l3-agent,openflow diff --git a/openstack/etc/neutron/plugins/nuage/nuage_plugin.ini b/openstack/etc/neutron/plugins/nuage/nuage_plugin.ini new file mode 100644 index 00000000..aad37bd5 --- /dev/null +++ b/openstack/etc/neutron/plugins/nuage/nuage_plugin.ini @@ -0,0 +1,41 @@ +# Please fill in the correct data for all the keys below and uncomment key-value pairs +[restproxy] +# (StrOpt) Default Network partition in which VSD will +# orchestrate network resources using openstack +# +#default_net_partition_name = + +# (StrOpt) Nuage provided uri for initial authorization to +# access VSD +# +#auth_resource = /auth + +# (StrOpt) IP Address and Port of VSD +# +#server = ip:port + +# (StrOpt) Organization name in which VSD will orchestrate +# network resources using openstack +# +#organization = org + +# (StrOpt) Username and password of VSD for authentication +# +#serverauth = uname:pass + +# (BoolOpt) Boolean for SSL connection with VSD server +# +#serverssl = True + +# (StrOpt) Nuage provided base uri to reach out to VSD +# +#base_uri = /base + +[syncmanager] +# (BoolOpt) Boolean to enable sync between openstack and VSD +# +#enable_sync = False + +# (IntOpt) Sync interval in seconds between openstack and VSD +# +#sync_interval = 0 \ No newline at end of file diff --git a/openstack/etc/neutron/plugins/oneconvergence/nvsdplugin.ini b/openstack/etc/neutron/plugins/oneconvergence/nvsdplugin.ini new file mode 100644 index 00000000..a1c05d97 --- /dev/null +++ b/openstack/etc/neutron/plugins/oneconvergence/nvsdplugin.ini @@ -0,0 +1,35 @@ +[nvsd] +# Configure the NVSD controller. The plugin proxies the api calls using +# to NVSD controller which implements the required functionality. + +# IP address of NVSD controller api server +# nvsd_ip = + +# Port number of NVSD controller api server +# nvsd_port = 8082 + +# Authentication credentials to access the api server +# nvsd_user = +# nvsd_passwd = + +# API request timeout in seconds +# request_timeout = + +# Maximum number of retry attempts to login to the NVSD controller +# Specify 0 to retry until success (default) +# nvsd_retries = 0 + +[securitygroup] +# Specify firewall_driver option, if neutron security groups are disabled, +# then NoopFirewallDriver otherwise OVSHybridIptablesFirewallDriver. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[agent] +# root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + +[database] +# connection = mysql://root:@127.0.0.1/?charset=utf8 diff --git a/openstack/etc/neutron/plugins/opencontrail/contrailplugin.ini b/openstack/etc/neutron/plugins/opencontrail/contrailplugin.ini new file mode 100644 index 00000000..629f1fc4 --- /dev/null +++ b/openstack/etc/neutron/plugins/opencontrail/contrailplugin.ini @@ -0,0 +1,26 @@ +# OpenContrail is an Apache 2.0-licensed project that is built using +# standards-based protocols and provides all the necessary components for +# network virtualization–SDN controller, virtual router, analytics engine, +# and published northbound APIs +# For more information visit: http://opencontrail.org + +# Opencontrail plugin specific configuration +[CONTRAIL] +# (StrOpt) IP address to connect to opencontrail controller. +# Uncomment this line for specifying the IP address of the opencontrail +# Api-Server. +# Default value is local host(127.0.0.1). +# api_server_ip='127.0.0.1' + +# (IntOpt) port to connect to opencontrail controller. +# Uncomment this line for the specifying the Port of the opencontrail +# Api-Server. +# Default value is 8082 +# api_server_port=8082 + +# (DictOpt) enable opencontrail extensions +# Opencontrail in future would support extension such as ipam, policy, +# these extensions can be configured as shown below. Plugin will then +# load the specified extensions. +# Default value is None, it wont load any extension +# contrail_extensions=ipam:,policy: diff --git a/openstack/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini b/openstack/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini new file mode 100644 index 00000000..232ca71d --- /dev/null +++ b/openstack/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini @@ -0,0 +1,141 @@ +[ovs] +# (BoolOpt) Set to True in the server and the agents to enable support +# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and +# GRE or VXLAN tunneling. +# +# WARNING: This option will be deprecated in the Icehouse release, at which +# point setting tunnel_types will be required to enable tunneling. +# +# enable_tunneling = False + +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch bay". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# +# integration_bridge = br-int + +# Only used for the agent if tunnel_id_ranges is not empty for +# the server. In most cases, the default value should be fine. +# +# tunnel_bridge = br-tun + +# Peer patch port in integration bridge for tunnel bridge +# int_peer_patch_port = patch-tun + +# Peer patch port in tunnel bridge for integration bridge +# tun_peer_patch_port = patch-int + +# Uncomment this line for the agent if tunnel_id_ranges is not +# empty for the server. Set local-ip to be the local IP address of +# this hypervisor. +# +# local_ip = + +# (ListOpt) Comma-separated list of : tuples +# mapping physical network names to the agent's node-specific OVS +# bridge names to be used for flat and VLAN networks. The length of +# bridge names should be no more than 11. Each bridge must +# exist, and should have a physical network interface configured as a +# port. All physical networks configured on the server should have +# mappings to appropriate bridges on each agent. +# +# bridge_mappings = +# Example: bridge_mappings = physnet1:br-eth1 + +# (BoolOpt) Use veths instead of patch ports to interconnect the integration +# bridge to physical networks. Support kernel without ovs patch port support +# so long as it is set to True. +# use_veth_interconnection = False + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# Minimize polling by monitoring ovsdb for interface changes +# minimize_polling = True + +# When minimize_polling = True, the number of seconds to wait before +# respawning the ovsdb monitor after losing communication with it +# ovsdb_monitor_respawn_interval = 30 + +# (ListOpt) The types of tenant network tunnels supported by the agent. +# Setting this will enable tunneling support in the agent. This can be set to +# either 'gre' or 'vxlan'. If this is unset, it will default to [] and +# disable tunneling support in the agent. +# You can specify as many values here as your compute hosts supports. +# +# tunnel_types = +# Example: tunnel_types = gre +# Example: tunnel_types = vxlan +# Example: tunnel_types = vxlan, gre + +# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By +# default, this will make use of the Open vSwitch default value of '4789' if +# not specified. +# +# vxlan_udp_port = +# Example: vxlan_udp_port = 8472 + +# (IntOpt) This is the MTU size of veth interfaces. +# Do not change unless you have a good reason to. +# The default MTU size of veth interfaces is 1500. +# This option has no effect if use_veth_interconnection is False +# veth_mtu = +# Example: veth_mtu = 1504 + +# (BoolOpt) Flag to enable l2-population extension. This option should only be +# used in conjunction with ml2 plugin and l2population mechanism driver. It'll +# enable plugin to populate remote ports macs and IPs (using fdb_add/remove +# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to +# optimize tunnel management. +# +# l2_population = False + +# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2 +# population ML2 MechanismDriver. +# +# arp_responder = False + +# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet +# carrying GRE/VXLAN tunnel. The default value is True. +# +# dont_fragment = True + +# (BoolOpt) Set to True on L2 agents to enable support +# for distributed virtual routing. +# +# enable_distributed_routing = False + +[securitygroup] +# Firewall driver for realizing neutron security group function. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# 1. With VLANs on eth1. +# [ovs] +# integration_bridge = br-int +# bridge_mappings = default:br-eth1 +# +# 2. With GRE tunneling. +# [ovs] +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 +# +# 3. With VXLAN tunneling. +# [ovs] +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 +# [agent] +# tunnel_types = vxlan diff --git a/openstack/etc/neutron/plugins/plumgrid/plumgrid.ini b/openstack/etc/neutron/plugins/plumgrid/plumgrid.ini new file mode 100644 index 00000000..bfe8062a --- /dev/null +++ b/openstack/etc/neutron/plugins/plumgrid/plumgrid.ini @@ -0,0 +1,14 @@ +# Config file for Neutron PLUMgrid Plugin + +[plumgriddirector] +# This line should be pointing to the PLUMgrid Director, +# for the PLUMgrid platform. +# director_server= +# director_server_port= +# Authentification parameters for the Director. +# These are the admin credentials to manage and control +# the PLUMgrid Director server. +# username= +# password= +# servertimeout=5 +# driver= diff --git a/openstack/etc/neutron/plugins/vmware/nsx.ini b/openstack/etc/neutron/plugins/vmware/nsx.ini new file mode 100644 index 00000000..a9bf5c5e --- /dev/null +++ b/openstack/etc/neutron/plugins/vmware/nsx.ini @@ -0,0 +1,203 @@ +[DEFAULT] +# User name for NSX controller +# nsx_user = admin + +# Password for NSX controller +# nsx_password = admin + +# Time before aborting a request on an unresponsive controller (Seconds) +# http_timeout = 75 + +# Maximum number of times a particular request should be retried +# retries = 2 + +# Maximum number of times a redirect response should be followed +# redirects = 2 + +# Comma-separated list of NSX controller endpoints (:). When port +# is omitted, 443 is assumed. This option MUST be specified, e.g.: +# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80 + +# UUID of the pre-existing default NSX Transport zone to be used for creating +# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.: +# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53 + +# (Optional) UUID for the default l3 gateway service to use with this cluster. +# To be specified if planning to use logical routers with external gateways. +# default_l3_gw_service_uuid = + +# (Optional) UUID for the default l2 gateway service to use with this cluster. +# To be specified for providing a predefined gateway tenant for connecting their networks. +# default_l2_gw_service_uuid = + +# (Optional) UUID for the default service cluster. A service cluster is introduced to +# represent a group of gateways and it is needed in order to use Logical Services like +# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this +# config parameter *MUST BE* set to a valid pre-existent service cluster uuid. +# default_service_cluster_uuid = + +# Name of the default interface name to be used on network-gateway. This value +# will be used for any device associated with a network gateway for which an +# interface name was not specified +# default_interface_name = breth0 + +# Reconnect connection to nsx if not used within this amount of time. +# conn_idle_timeout = 900 + +[quotas] +# number of network gateways allowed per tenant, -1 means unlimited +# quota_network_gateway = 5 + +[vcns] +# URL for VCNS manager +# manager_uri = https://management_ip + +# User name for VCNS manager +# user = admin + +# Password for VCNS manager +# password = default + +# (Optional) Datacenter ID for Edge deployment +# datacenter_moid = + +# (Optional) Deployment Container ID for NSX Edge deployment +# If not specified, either a default global container will be used, or +# the resource pool and datastore specified below will be used +# deployment_container_id = + +# (Optional) Resource pool ID for NSX Edge deployment +# resource_pool_id = + +# (Optional) Datastore ID for NSX Edge deployment +# datastore_id = + +# (Required) UUID of logic switch for physical network connectivity +# external_network = + +# (Optional) Asynchronous task status check interval +# default is 2000 (millisecond) +# task_status_check_interval = 2000 + +[nsx] +# Maximum number of ports for each bridged logical switch +# The recommended value for this parameter varies with NSX version +# Please use: +# NSX 2.x -> 64 +# NSX 3.0, 3.1 -> 5000 +# NSX 3.2 -> 10000 +# max_lp_per_bridged_ls = 5000 + +# Maximum number of ports for each overlay (stt, gre) logical switch +# max_lp_per_overlay_ls = 256 + +# Number of connections to each controller node. +# default is 10 +# concurrent_connections = 10 + +# Number of seconds a generation id should be valid for (default -1 meaning do not time out) +# nsx_gen_timeout = -1 + +# Acceptable values for 'metadata_mode' are: +# - 'access_network': this enables a dedicated connection to the metadata +# proxy for metadata server access via Neutron router. +# - 'dhcp_host_route': this enables host route injection via the dhcp agent. +# This option is only useful if running on a host that does not support +# namespaces otherwise access_network should be used. +# metadata_mode = access_network + +# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt) +# default_transport_type = stt + +# Specifies in which mode the plugin needs to operate in order to provide DHCP and +# metadata proxy services to tenant instances. If 'agent' is chosen (default) +# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to +# provide such services. In this mode, the plugin supports API extensions 'agent' +# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse), +# the plugin will use NSX logical services for DHCP and metadata proxy. This +# simplifies the deployment model for Neutron, in that the plugin no longer requires +# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode +# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above. +# Furthermore, a 'combined' mode is also provided and is used to support existing +# deployments that want to adopt the agentless mode going forward. With this mode, +# existing networks keep being served by the existing infrastructure (thus preserving +# backward compatibility, whereas new networks will be served by the new infrastructure. +# Migration tools are provided to 'move' one network from one model to another; with +# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is +# ignored, as new networks will no longer be scheduled to existing dhcp agents. +# agent_mode = agent + +# Specifies which mode packet replication should be done in. If set to service +# a service node is required in order to perform packet replication. This can +# also be set to source if one wants replication to be performed locally (NOTE: +# usually only useful for testing if one does not want to deploy a service node). +# In order to leverage distributed routers, replication_mode should be set to +# "service". +# replication_mode = service + +[nsx_sync] +# Interval in seconds between runs of the status synchronization task. +# The plugin will aim at resynchronizing operational status for all +# resources in this interval, and it should be therefore large enough +# to ensure the task is feasible. Otherwise the plugin will be +# constantly synchronizing resource status, ie: a new task is started +# as soon as the previous is completed. +# If this value is set to 0, the state synchronization thread for this +# Neutron instance will be disabled. +# state_sync_interval = 10 + +# Random additional delay between two runs of the state synchronization task. +# An additional wait time between 0 and max_random_sync_delay seconds +# will be added on top of state_sync_interval. +# max_random_sync_delay = 0 + +# Minimum delay, in seconds, between two status synchronization requests for NSX. +# Depending on chunk size, controller load, and other factors, state +# synchronization requests might be pretty heavy. This means the +# controller might take time to respond, and its load might be quite +# increased by them. This parameter allows to specify a minimum +# interval between two subsequent requests. +# The value for this parameter must never exceed state_sync_interval. +# If this does, an error will be raised at startup. +# min_sync_req_delay = 1 + +# Minimum number of resources to be retrieved from NSX in a single status +# synchronization request. +# The actual size of the chunk will increase if the number of resources is such +# that using the minimum chunk size will cause the interval between two +# requests to be less than min_sync_req_delay +# min_chunk_size = 500 + +# Enable this option to allow punctual state synchronization on show +# operations. In this way, show operations will always fetch the operational +# status of the resource from the NSX backend, and this might have +# a considerable impact on overall performance. +# always_read_status = False + +[nsx_lsn] +# Pull LSN information from NSX in case it is missing from the local +# data store. This is useful to rebuild the local store in case of +# server recovery +# sync_on_missing_data = False + +[nsx_dhcp] +# (Optional) Comma separated list of additional dns servers. Default is an empty list +# extra_domain_name_servers = + +# Domain to use for building the hostnames +# domain_name = openstacklocal + +# Default DHCP lease time +# default_lease_time = 43200 + +[nsx_metadata] +# IP address used by Metadata server +# metadata_server_address = 127.0.0.1 + +# TCP Port used by Metadata server +# metadata_server_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it MUST match with the configuration used by the Metadata server +# metadata_shared_secret = diff --git a/openstack/etc/neutron/policy.json b/openstack/etc/neutron/policy.json new file mode 100644 index 00000000..3f692281 --- /dev/null +++ b/openstack/etc/neutron/policy.json @@ -0,0 +1,138 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", + "admin_only": "rule:context_is_admin", + "regular_user": "", + "shared": "field:networks:shared=True", + "shared_firewalls": "field:firewalls:shared=True", + "external": "field:networks:router:external=True", + "default": "rule:admin_or_owner", + + "create_subnet": "rule:admin_or_network_owner", + "get_subnet": "rule:admin_or_owner or rule:shared", + "update_subnet": "rule:admin_or_network_owner", + "delete_subnet": "rule:admin_or_network_owner", + + "create_network": "", + "get_network": "rule:admin_or_owner or rule:shared or rule:external", + "get_network:router:external": "rule:regular_user", + "get_network:segments": "rule:admin_only", + "get_network:provider:network_type": "rule:admin_only", + "get_network:provider:physical_network": "rule:admin_only", + "get_network:provider:segmentation_id": "rule:admin_only", + "get_network:queue_id": "rule:admin_only", + "create_network:shared": "rule:admin_only", + "create_network:router:external": "rule:admin_only", + "create_network:segments": "rule:admin_only", + "create_network:provider:network_type": "rule:admin_only", + "create_network:provider:physical_network": "rule:admin_only", + "create_network:provider:segmentation_id": "rule:admin_only", + "update_network": "rule:admin_or_owner", + "update_network:segments": "rule:admin_only", + "update_network:shared": "rule:admin_only", + "update_network:provider:network_type": "rule:admin_only", + "update_network:provider:physical_network": "rule:admin_only", + "update_network:provider:segmentation_id": "rule:admin_only", + "update_network:router:external": "rule:admin_only", + "delete_network": "rule:admin_or_owner", + + "create_port": "", + "create_port:mac_address": "rule:admin_or_network_owner", + "create_port:fixed_ips": "rule:admin_or_network_owner", + "create_port:port_security_enabled": "rule:admin_or_network_owner", + "create_port:binding:host_id": "rule:admin_only", + "create_port:binding:profile": "rule:admin_only", + "create_port:mac_learning_enabled": "rule:admin_or_network_owner", + "get_port": "rule:admin_or_owner", + "get_port:queue_id": "rule:admin_only", + "get_port:binding:vif_type": "rule:admin_only", + "get_port:binding:vif_details": "rule:admin_only", + "get_port:binding:host_id": "rule:admin_only", + "get_port:binding:profile": "rule:admin_only", + "update_port": "rule:admin_or_owner", + "update_port:fixed_ips": "rule:admin_or_network_owner", + "update_port:port_security_enabled": "rule:admin_or_network_owner", + "update_port:binding:host_id": "rule:admin_only", + "update_port:binding:profile": "rule:admin_only", + "update_port:mac_learning_enabled": "rule:admin_or_network_owner", + "delete_port": "rule:admin_or_owner", + + "get_router:ha": "rule:admin_only", + "create_router": "rule:regular_user", + "create_router:external_gateway_info:enable_snat": "rule:admin_only", + "create_router:distributed": "rule:admin_only", + "create_router:ha": "rule:admin_only", + "get_router": "rule:admin_or_owner", + "get_router:distributed": "rule:admin_only", + "update_router:external_gateway_info:enable_snat": "rule:admin_only", + "update_router:distributed": "rule:admin_only", + "update_router:ha": "rule:admin_only", + "delete_router": "rule:admin_or_owner", + + "add_router_interface": "rule:admin_or_owner", + "remove_router_interface": "rule:admin_or_owner", + + "create_firewall": "", + "get_firewall": "rule:admin_or_owner", + "create_firewall:shared": "rule:admin_only", + "get_firewall:shared": "rule:admin_only", + "update_firewall": "rule:admin_or_owner", + "update_firewall:shared": "rule:admin_only", + "delete_firewall": "rule:admin_or_owner", + + "create_firewall_policy": "", + "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", + "create_firewall_policy:shared": "rule:admin_or_owner", + "update_firewall_policy": "rule:admin_or_owner", + "delete_firewall_policy": "rule:admin_or_owner", + + "create_firewall_rule": "", + "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", + "update_firewall_rule": "rule:admin_or_owner", + "delete_firewall_rule": "rule:admin_or_owner", + + "create_qos_queue": "rule:admin_only", + "get_qos_queue": "rule:admin_only", + + "update_agent": "rule:admin_only", + "delete_agent": "rule:admin_only", + "get_agent": "rule:admin_only", + + "create_dhcp-network": "rule:admin_only", + "delete_dhcp-network": "rule:admin_only", + "get_dhcp-networks": "rule:admin_only", + "create_l3-router": "rule:admin_only", + "delete_l3-router": "rule:admin_only", + "get_l3-routers": "rule:admin_only", + "get_dhcp-agents": "rule:admin_only", + "get_l3-agents": "rule:admin_only", + "get_loadbalancer-agent": "rule:admin_only", + "get_loadbalancer-pools": "rule:admin_only", + + "create_floatingip": "rule:regular_user", + "update_floatingip": "rule:admin_or_owner", + "delete_floatingip": "rule:admin_or_owner", + "get_floatingip": "rule:admin_or_owner", + + "create_network_profile": "rule:admin_only", + "update_network_profile": "rule:admin_only", + "delete_network_profile": "rule:admin_only", + "get_network_profiles": "", + "get_network_profile": "", + "update_policy_profiles": "rule:admin_only", + "get_policy_profiles": "", + "get_policy_profile": "", + + "create_metering_label": "rule:admin_only", + "delete_metering_label": "rule:admin_only", + "get_metering_label": "rule:admin_only", + + "create_metering_label_rule": "rule:admin_only", + "delete_metering_label_rule": "rule:admin_only", + "get_metering_label_rule": "rule:admin_only", + + "get_service_provider": "rule:regular_user", + "get_lsn": "rule:admin_only", + "create_lsn": "rule:admin_only" +} diff --git a/openstack/etc/neutron/rootwrap.conf b/openstack/etc/neutron/rootwrap.conf new file mode 100644 index 00000000..ab5f4393 --- /dev/null +++ b/openstack/etc/neutron/rootwrap.conf @@ -0,0 +1,34 @@ +# Configuration for neutron-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR + +[xenapi] +# XenAPI configuration is only required by the L2 agent if it is to +# target a XenServer/XCP compute host's dom0. +xenapi_connection_url= +xenapi_connection_username=root +xenapi_connection_password= diff --git a/openstack/etc/neutron/rootwrap.d/cisco-apic.filters b/openstack/etc/neutron/rootwrap.d/cisco-apic.filters new file mode 100644 index 00000000..69e4afcc --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/cisco-apic.filters @@ -0,0 +1,16 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# cisco-apic filters +lldpctl: CommandFilter, lldpctl, root + +# ip_lib filters +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/openstack/etc/neutron/rootwrap.d/debug.filters b/openstack/etc/neutron/rootwrap.d/debug.filters new file mode 100644 index 00000000..b61d9601 --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/debug.filters @@ -0,0 +1,14 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# This is needed because we should ping +# from inside a namespace which requires root +ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+ +ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+ diff --git a/openstack/etc/neutron/rootwrap.d/dhcp.filters b/openstack/etc/neutron/rootwrap.d/dhcp.filters new file mode 100644 index 00000000..0712ec13 --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/dhcp.filters @@ -0,0 +1,35 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# dhcp-agent +dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID= +# dhcp-agent uses kill as well, that's handled by the generic KillFilter +# it looks like these are the only signals needed, per +# neutron/agent/linux/dhcp.py +kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP +kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +ivs-ctl: CommandFilter, ivs-ctl, root +mm-ctl: CommandFilter, mm-ctl, root +dhcp_release: CommandFilter, dhcp_release, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, python, -9 +kill_metadata7: KillFilter, root, python2.7, -9 + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/openstack/etc/neutron/rootwrap.d/ipset-firewall.filters b/openstack/etc/neutron/rootwrap.d/ipset-firewall.filters new file mode 100644 index 00000000..52c66373 --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/ipset-firewall.filters @@ -0,0 +1,12 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] +# neutron/agent/linux/iptables_firewall.py +# "ipset", "-A", ... +ipset: CommandFilter, ipset, root diff --git a/openstack/etc/neutron/rootwrap.d/iptables-firewall.filters b/openstack/etc/neutron/rootwrap.d/iptables-firewall.filters new file mode 100644 index 00000000..b8a6ab5b --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/iptables-firewall.filters @@ -0,0 +1,21 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# neutron/agent/linux/iptables_manager.py +# "iptables-save", ... +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# neutron/agent/linux/iptables_manager.py +# "iptables", "-A", ... +iptables: CommandFilter, iptables, root +ip6tables: CommandFilter, ip6tables, root diff --git a/openstack/etc/neutron/rootwrap.d/l3.filters b/openstack/etc/neutron/rootwrap.d/l3.filters new file mode 100644 index 00000000..be69b32c --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/l3.filters @@ -0,0 +1,48 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# arping +arping: CommandFilter, arping, root + +# l3_agent +sysctl: CommandFilter, sysctl, root +route: CommandFilter, route, root +radvd: CommandFilter, radvd, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, python, -9 +kill_metadata7: KillFilter, root, python2.7, -9 +kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP +kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root + +# ovs_lib (if OVSInterfaceDriver is used) +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# iptables_manager +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# Keepalived +keepalived: CommandFilter, keepalived, root +kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9 + +# l3 agent to delete floatingip's conntrack state +conntrack: CommandFilter, conntrack, root diff --git a/openstack/etc/neutron/rootwrap.d/lbaas-haproxy.filters b/openstack/etc/neutron/rootwrap.d/lbaas-haproxy.filters new file mode 100644 index 00000000..b4e1ecba --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/lbaas-haproxy.filters @@ -0,0 +1,26 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# haproxy +haproxy: CommandFilter, haproxy, root + +# lbaas-agent uses kill as well, that's handled by the generic KillFilter +kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +mm-ctl: CommandFilter, mm-ctl, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +route: CommandFilter, route, root + +# arping +arping: CommandFilter, arping, root diff --git a/openstack/etc/neutron/rootwrap.d/linuxbridge-plugin.filters b/openstack/etc/neutron/rootwrap.d/linuxbridge-plugin.filters new file mode 100644 index 00000000..03df3959 --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/linuxbridge-plugin.filters @@ -0,0 +1,19 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# linuxbridge-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +brctl: CommandFilter, brctl, root +bridge: CommandFilter, bridge, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/openstack/etc/neutron/rootwrap.d/nec-plugin.filters b/openstack/etc/neutron/rootwrap.d/nec-plugin.filters new file mode 100644 index 00000000..89c4cfe3 --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/nec-plugin.filters @@ -0,0 +1,12 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# nec_neutron_agent +ovs-vsctl: CommandFilter, ovs-vsctl, root diff --git a/openstack/etc/neutron/rootwrap.d/ofagent.filters b/openstack/etc/neutron/rootwrap.d/ofagent.filters new file mode 100644 index 00000000..11e42564 --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/ofagent.filters @@ -0,0 +1,16 @@ +# neutron-rootwrap command filters for nodes on which +# neutron-ofagent-agent is expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# ovs_lib +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/openstack/etc/neutron/rootwrap.d/openvswitch-plugin.filters b/openstack/etc/neutron/rootwrap.d/openvswitch-plugin.filters new file mode 100644 index 00000000..b63a83b9 --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/openvswitch-plugin.filters @@ -0,0 +1,22 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# openvswitch-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +ovs-vsctl: CommandFilter, ovs-vsctl, root +ovs-ofctl: CommandFilter, ovs-ofctl, root +kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9 +ovsdb-client: CommandFilter, ovsdb-client, root +xe: CommandFilter, xe, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/openstack/etc/neutron/rootwrap.d/vpnaas.filters b/openstack/etc/neutron/rootwrap.d/vpnaas.filters new file mode 100644 index 00000000..7848136b --- /dev/null +++ b/openstack/etc/neutron/rootwrap.d/vpnaas.filters @@ -0,0 +1,13 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +openswan: CommandFilter, ipsec, root diff --git a/openstack/etc/neutron/vpn_agent.ini b/openstack/etc/neutron/vpn_agent.ini new file mode 100644 index 00000000..c3089df9 --- /dev/null +++ b/openstack/etc/neutron/vpn_agent.ini @@ -0,0 +1,14 @@ +[DEFAULT] +# VPN-Agent configuration file +# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also + +[vpnagent] +# vpn device drivers which vpn agent will use +# If we want to use multiple drivers, we need to define this option multiple times. +# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver +# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver +# vpn_device_driver=another_driver + +[ipsec] +# Status check interval +# ipsec_status_check_interval=60 diff --git a/openstack/etc/nova/api-paste.ini b/openstack/etc/nova/api-paste.ini new file mode 100644 index 00000000..2a825a5b --- /dev/null +++ b/openstack/etc/nova/api-paste.ini @@ -0,0 +1,118 @@ +############ +# Metadata # +############ +[composite:metadata] +use = egg:Paste#urlmap +/: meta + +[pipeline:meta] +pipeline = ec2faultwrap logrequest metaapp + +[app:metaapp] +paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory + +####### +# EC2 # +####### + +[composite:ec2] +use = egg:Paste#urlmap +/services/Cloud: ec2cloud + +[composite:ec2cloud] +use = call:nova.api.auth:pipeline_factory +noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor +keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor + +[filter:ec2faultwrap] +paste.filter_factory = nova.api.ec2:FaultWrapper.factory + +[filter:logrequest] +paste.filter_factory = nova.api.ec2:RequestLogging.factory + +[filter:ec2lockout] +paste.filter_factory = nova.api.ec2:Lockout.factory + +[filter:ec2keystoneauth] +paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory + +[filter:ec2noauth] +paste.filter_factory = nova.api.ec2:NoAuth.factory + +[filter:cloudrequest] +controller = nova.api.ec2.cloud.CloudController +paste.filter_factory = nova.api.ec2:Requestify.factory + +[filter:authorizer] +paste.filter_factory = nova.api.ec2:Authorizer.factory + +[filter:validator] +paste.filter_factory = nova.api.ec2:Validator.factory + +[app:ec2executor] +paste.app_factory = nova.api.ec2:Executor.factory + +############# +# OpenStack # +############# + +[composite:osapi_compute] +use = call:nova.api.openstack.urlmap:urlmap_factory +/: oscomputeversions +/v1.1: openstack_compute_api_v2 +/v2: openstack_compute_api_v2 +/v3: openstack_compute_api_v3 + +[composite:openstack_compute_api_v2] +use = call:nova.api.auth:pipeline_factory +noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2 +keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2 +keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2 + +[composite:openstack_compute_api_v3] +use = call:nova.api.auth:pipeline_factory_v3 +noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3 +keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3 + +[filter:request_id] +paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:compute_req_id] +paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory + +[filter:faultwrap] +paste.filter_factory = nova.api.openstack:FaultWrapper.factory + +[filter:noauth] +paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory + +[filter:noauth_v3] +paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory + +[filter:ratelimit] +paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory + +[app:osapi_compute_app_v2] +paste.app_factory = nova.api.openstack.compute:APIRouter.factory + +[app:osapi_compute_app_v3] +paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory + +[pipeline:oscomputeversions] +pipeline = faultwrap oscomputeversionapp + +[app:oscomputeversionapp] +paste.app_factory = nova.api.openstack.compute.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/openstack/etc/nova/cells.json b/openstack/etc/nova/cells.json new file mode 100644 index 00000000..cc74930d --- /dev/null +++ b/openstack/etc/nova/cells.json @@ -0,0 +1,26 @@ +{ + "parent": { + "name": "parent", + "api_url": "http://api.example.com:8774", + "transport_url": "rabbit://rabbit.example.com", + "weight_offset": 0.0, + "weight_scale": 1.0, + "is_parent": true + }, + "cell1": { + "name": "cell1", + "api_url": "http://api.example.com:8774", + "transport_url": "rabbit://rabbit1.example.com", + "weight_offset": 0.0, + "weight_scale": 1.0, + "is_parent": false + }, + "cell2": { + "name": "cell2", + "api_url": "http://api.example.com:8774", + "transport_url": "rabbit://rabbit2.example.com", + "weight_offset": 0.0, + "weight_scale": 1.0, + "is_parent": false + } +} diff --git a/openstack/etc/nova/logging.conf b/openstack/etc/nova/logging.conf new file mode 100644 index 00000000..5482a040 --- /dev/null +++ b/openstack/etc/nova/logging.conf @@ -0,0 +1,81 @@ +[loggers] +keys = root, nova + +[handlers] +keys = stderr, stdout, watchedfile, syslog, null + +[formatters] +keys = context, default + +[logger_root] +level = WARNING +handlers = null + +[logger_nova] +level = INFO +handlers = stderr +qualname = nova + +[logger_amqp] +level = WARNING +handlers = stderr +qualname = amqp + +[logger_amqplib] +level = WARNING +handlers = stderr +qualname = amqplib + +[logger_sqlalchemy] +level = WARNING +handlers = stderr +qualname = sqlalchemy +# "level = INFO" logs SQL queries. +# "level = DEBUG" logs SQL queries and results. +# "level = WARNING" logs neither. (Recommended for production systems.) + +[logger_boto] +level = WARNING +handlers = stderr +qualname = boto + +[logger_suds] +level = INFO +handlers = stderr +qualname = suds + +[logger_eventletwsgi] +level = WARNING +handlers = stderr +qualname = eventlet.wsgi.server + +[handler_stderr] +class = StreamHandler +args = (sys.stderr,) +formatter = context + +[handler_stdout] +class = StreamHandler +args = (sys.stdout,) +formatter = context + +[handler_watchedfile] +class = handlers.WatchedFileHandler +args = ('nova.log',) +formatter = context + +[handler_syslog] +class = handlers.SysLogHandler +args = ('/dev/log', handlers.SysLogHandler.LOG_USER) +formatter = context + +[handler_null] +class = nova.openstack.common.log.NullHandler +formatter = default +args = () + +[formatter_context] +class = nova.openstack.common.log.ContextFormatter + +[formatter_default] +format = %(message)s diff --git a/openstack/etc/nova/nova-compute.conf b/openstack/etc/nova/nova-compute.conf new file mode 100644 index 00000000..1ef5590c --- /dev/null +++ b/openstack/etc/nova/nova-compute.conf @@ -0,0 +1,4 @@ +[DEFAULT] +compute_driver=libvirt.LibvirtDriver +[libvirt] +virt_type=qemu diff --git a/openstack/etc/nova/nova.conf b/openstack/etc/nova/nova.conf new file mode 100644 index 00000000..b703591f --- /dev/null +++ b/openstack/etc/nova/nova.conf @@ -0,0 +1,631 @@ +# Full list of options available at: http://wiki.openstack.org/NovaConfigOptions +[DEFAULT] + +# LOG/STATE +verbose=True +logdir=/var/log/nova + +### nova.availability_zones ### +############################### +# availability_zone to show internal services under (string value) +#internal_service_availability_zone=internal + +# default compute node availability_zone (string value) +#default_availability_zone=nova + +### nova.crypto ### +################### +# Filename of root CA (string value) +#ca_file=cacert.pem + +# Filename of private key (string value) +#key_file=private/cakey.pem + +# Filename of root Certificate Revocation List (string value) +#crl_file=crl.pem + +# Where we keep our keys (string value) +#keys_path=$state_path/keys + +# Where we keep our root CA (string value) +#ca_path=$state_path/CA + +# Should we use a CA for each project? (boolean value) +#use_project_ca=false + +# Subject for certificate for users, %s for project, user, +# timestamp (string value) +#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s + +# Subject for certificate for projects, %s for project, +# timestamp (string value) +#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s + +### nova.exception ### +# make exception message format errors fatal (boolean value) +#fatal_exception_format_errors=false + +### nova.manager ### +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + +############################# +# Mandatory general options # +############################# +# ip address of this host (string value) +my_ip=##NOVA_HOST## +#use_ipv6=false + + +######## +# APIs # +######## +# Selects the type of APIs you want to activate. +# Each API will bind on a specific port. +# Compute nodes should run only the metadata API, +# a nova API endpoint node should run osapi_compute. +# If you want to use nova-volume you can also enable +# osapi_volume, but if you want to run cinder, do not +# activate it. +# The list of API is: ec2,osapi_compute,metadata,osapi_volume +enabled_apis=ec2,osapi_compute,metadata + +# NOVA API # +# # # # # # +#osapi_compute_listen="0.0.0.0" +#osapi_compute_listen_port=8774 + +#api_paste_config=api-paste.ini + +# Allows use of instance password during server creation +#enable_instance_password=true + + +# Name of this node. This can be an opaque identifier. It is +# not necessarily a hostname, FQDN, or IP address. However, +# the node name must be valid within an AMQP key, and if using +# ZeroMQ, a valid hostname, FQDN, or IP address (string value) +#host="firefly-2.local" + +####################### +# Nova API extentions # +####################### +# osapi compute extension to load (multi valued) +osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions + +# Specify list of extensions to load when using +# osapi_compute_extension option with +# nova.api.openstack.compute.contrib.select_extensions (list value) +#osapi_compute_ext_list="" + +# Permit instance snapshot operations. (boolean value) +#allow_instance_snapshots=true + +# S3 # +# # # +s3_host=$my_ip +#s3_port=3333 + +# EC2 API # +# # # # # # +#ec2_host="$my_ip" +ec2_dmz_host="$my_ip" +#ec2_private_dns_show_ip=True +#ec2_path="/services/Cloud" +#ec2_port=8773 +# the protocol to use when connecting to the ec2 api server (http, https) (string value) +#ec2_scheme=http + +# port and IP for ec2 api to listen +#ec2_listen="0.0.0.0" +#ec2_listen_port=8773 + +# Metadata API # +# # # # # # # # +#metadata_host=$my_ip +#metadata_port=8775 +#metadata_listen=0.0.0.0 + +######## +# MISC # +######## +#resume_guests_state_on_host_boot=false +#instance_name_template="instance-%08x" +# Inject the admin password at boot time, without an agent. +#libvirt_inject_password=false + +######## +# LOGS # +######## +#log-date-format="%Y-%m-%d %H:%M:%S" +#debug=false + +########## +# SYSTEM # +########## +state_path=/var/lib/nova +lock_path=/var/lock/nova +rootwrap_config=/etc/nova/rootwrap.conf +#memcached_servers= + +################## +# AUTHENTICATION # +################## +auth_strategy=keystone +# Seconds for auth tokens to linger + +############# +# SCHEDULER # +############# +compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler +#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler +scheduler_default_filters=AggregateInstanceExtraSpecsFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter + +#################### +# VOLUMES / CINDER # +#################### +# The full class name of the volume API class to use (string value) +#volume_api_class=nova.volume.cinder.API + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure=false + +# Allow attach between instance and volume in different +# availability zones. (boolean value) +#cinder_cross_az_attach=true + +# Libvirt handlers for remote volumes. (list value) +#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver + +############ +# RABBITMQ # +############ +rabbit_host = ##RABBITMQ_HOST## +#fake_rabbit=false +#rabbit_virtual_host=/ +rabbit_userid = ##RABBITMQ_USER## +rabbit_password = ##RABBITMQ_PASSWORD## +rabbit_port = ##RABBITMQ_PORT## +rabbit_use_ssl=false +#rabbit_retry_interval=1 +# The messaging module to use, defaults to kombu (works for rabbit). +# You can also use qpid: nova.rpc.impl_qpid +rpc_backend = nova.openstack.common.rpc.impl_kombu + +########## +# GLANCE # +########## +host=##GLANCE_HOST## +port=9292 +protocol=http + +# A list of the glance api servers available to nova. Prefix +# with https:// for ssl-based glance api servers. +# ([hostname|ip]:port) (list value) +api_servers=$glance_host:$glance_port +#api_servers=localhost:9292 + +# Allow to perform insecure SSL (https) requests to glance (boolean value) +#api_insecure=false + +# Cache glance images locally +#cache_images=true +# Number retries when downloading an image from glance (integer value) +#num_retries=0 + +#image_service=nova.image.glance.GlanceImageService + +############################### +# Type of network APIs to use # +############################### +# The full class name of the network API class to use (string value) +# Possible values are: +# nova.network.api.API (if you wish to use nova-network) +# nova.network.neutronv2.api.API (if you want to use Neutron) +network_api_class=nova.network.neutronv2.api.API + +# Type of security group API. Possible values are: +# nova (if you are using nova-network) +# neutron (if you use neutron) +security_group_api = neutron + +# Driver used to create ethernet devices. (string value) +# When using linux net, use: nova.network.linux_net.LinuxBridgeInterfaceDriver +# for Neutron, use: nova.network.linux_net.LinuxOVSInterfaceDriver +linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver + +# Firewall type to use. (defaults to hypervisor specific iptables driver) (string value) +# For linux net, use: nova.virt.libvirt.firewall.IptablesFirewallDriver +# For Neutron and OVS, use: nova.virt.firewall.NoopFirewallDriver (since this is handled by Neutron) +###firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver +firewall_driver=nova.virt.firewall.NoopFirewallDriver + +####################### +# NETWORK (linux net) # +####################### +#network_manager=nova.network.manager.VlanManager +network_manager=nova.network.manager.FlatDHCPManager +#force_dhcp_release=false +force_dhcp_release=True +#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf +dhcpbridge_flagfile=/etc/nova/nova.conf +#dhcpbridge=$bindir/nova-dhcpbridge +#dhcp_lease_time=120 +# Firewall driver (defaults to hypervisor specific iptables driver) (string value) +firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver +# Interface for public IP addresses (default: eth0) (string value) +#public_interface=br-ext +public_interface=eth0 +# vlans will bridge into this interface if set (default: ) (string value) +# FlatDhcp will bridge into this interface if set (default: ) (string value) +#vlan_interface=eth1 +vlan_interface=eth0 +# Bridge for simple network instances (default: ) (string value) +flat_network_bridge=br100 +# FlatDhcp will bridge into this interface if set (default: ) (string value) +flat_interface=eth0 + +# set it to the /32 of your metadata server if you have just one +# It is a cidr in case there are multiple services that you want +# to keep using the internal private ips. +# A list of dmz range that should be accepted (list value) +#dmz_cidr=169.254.169.254/32 +# Name of Open vSwitch bridge used with linuxnet (string value) +#linuxnet_ovs_integration_bridge="br-int" +#routing_source_ip="$my_ip" +# Only first nic of vm will get default gateway from dhcp server +#use_single_default_gateway=false + +########### +# Neutron # +########### +# This is the URL of your neutron server: +neutron_url=##NEUTRON_PUBLIC_URL## +neutron_auth_strategy=keystone +neutron_admin_tenant_name=service +neutron_admin_username=##NEUTRON_SERVICE_USER## +neutron_admin_password=##NEUTRON_SERVICE_PASSWORD## +# This is the URL of your Keystone server +neutron_admin_auth_url=##KEYSTONE_ADMIN_URL## + +# What's below is only needed for nova-compute. + +# Set flag to indicate Neutron will proxy metadata requests +# and resolve instance ids. This is needed to use neutron-metadata-agent +# (instead of the metadata server of nova-api, +# which doesn't work with neutron) (boolean value) +service_neutron_metadata_proxy=True + +# Shared secret to validate proxies Neutron metadata requests +# This password should match what is in /etc/neutron/metadata_agent.ini +# (string value) +neutron_metadata_proxy_shared_secret= ##METADATA_PROXY_SHARED_SECRET## + +################# +# NOVNC CONSOLE # +################# +# By default with the Debian package, the spicehtml5 console is the default. To +# enable the NoVNC mode, enable the switch below, disable SPICE in this +# nova.conf file as well (see far below), then edit +# /etc/default/nova-consoleproxy to switch to NoVNC, shutdown the SPICE with +# /etc/init.d/nova-spicehtml5proxy stop, and finally start nova-novncproxy. +# Do not forget to restart Nova daemons and restart your VMs if you want to use +# NoVNC form now on (VMs video card needs to be attached to a console type, and +# they can accept only one video card at a time). +vnc_enabled=True +novncproxy_base_url=##NOVA_NOVNCPROXY_BASE_URL## +# Change vncserver_proxyclient_address and vncserver_listen to match each compute host +vncserver_proxyclient_address=##NOVA_HOST## +vncserver_listen=##NOVA_HOST## +vnc_keymap="en-us" + +###################################### +# nova-xenvncproxy (eg: xvpvncproxy) # +###################################### +# See NoVNC comments above for switching away from SPICE to XVP +#xvpvncproxy_host="0.0.0.0" +#xvpvncproxy_port=6081 + +######### +# QUOTA # +######### +# number of instances allowed per project (integer value) +#quota_instances=10 +# number of instance cores allowed per project (integer value) +#quota_cores=20 +# megabytes of instance ram allowed per project (integer value) +#quota_ram=51200 +# number of floating ips allowed per project (integer value) +#quota_floating_ips=10 +# number of metadata items allowed per instance (integer value) +#quota_metadata_items=128 +# number of injected files allowed (integer value) +#quota_injected_files=5 +# number of bytes allowed per injected file (integer value) +#quota_injected_file_content_bytes=10240 +# number of bytes allowed per injected file path (integer value) +#quota_injected_file_path_bytes=255 +# number of security groups per project (integer value) +#quota_security_groups=10 +# number of security rules per security group (integer value) +#quota_security_group_rules=20 +# number of key pairs per user (integer value) +#quota_key_pairs=100 +# number of seconds until a reservation expires (integer value) +#reservation_expire=86400 +# count of reservations until usage is refreshed (integer value) +#until_refresh=0 +# number of seconds between subsequent usage refreshes (integer value) +#max_age=0 +# default driver to use for quota checks (string value) +#quota_driver=nova.quota.DbQuotaDriver + +############ +# DATABASE # +############ +[database] +connection=postgresql://##NOVA_DB_USER##:##NOVA_DB_PASSWORD##@onenode/nova + +############# +# CONDUCTOR # +############# +[conductor] +# Perform nova-conductor operations locally (boolean value) +# use_local enabled for one node. For multinode this should be disabled. +use_local=true +# the topic conductor nodes listen on (string value) +#topic=conductor +# full class name for the Manager for conductor (string value) +#manager=nova.conductor.manager.ConductorManager + +######### +# CELLS # +######### +[cells] +# Cells communication driver to use (string value) +#driver=nova.cells.rpc_driver.CellsRPCDriver + +# Number of seconds after an instance was updated or deleted +# to continue to update cells (integer value) +#instance_updated_at_threshold=3600 + +# Number of instances to update per periodic task run (integer +# value) +#instance_update_num_instances=1 + +# Maximum number of hops for cells routing. (integer value) +#max_hop_count=10 + +# Cells scheduler to use (string value) +#scheduler=nova.cells.scheduler.CellsScheduler + +# Enable cell functionality (boolean value) +#enable=false + +# the topic cells nodes listen on (string value) +#topic=cells + +# Manager for cells (string value) +#manager=nova.cells.manager.CellsManager + +# name of this cell (string value) +#name=nova + +# Key/Multi-value list with the capabilities of the cell (list +# value) +#capabilities=hypervisor=xenserver;kvm,os=linux;windows + +# Seconds to wait for response from a call to a cell. (integer +# value) +#call_timeout=60 + +# Percentage of cell capacity to hold in reserve. Affects both +# memory and disk utilization (floating point value) +#reserve_percent=10.0 + +# Type of cell: api or compute (string value) +#cell_type= + +# Base queue name to use when communicating between cells. +# Various topics by message type will be appended to this. +# (string value) +#rpc_driver_queue_base=cells.intercell + +# Filter classes the cells scheduler should use. An entry of +# "nova.cells.filters.all_filters"maps to all cells filters +# included with nova. (list value) +#scheduler_filter_classes=nova.cells.filters.all_filters + +# Weigher classes the cells scheduler should use. An entry of +# "nova.cells.weights.all_weighers"maps to all cell weighers +# included with nova. (list value) +#scheduler_weight_classes=nova.cells.weights.all_weighers + +# How many retries when no cells are available. (integer +# value) +#scheduler_retries=10 + +# How often to retry in seconds when no cells are available. +# (integer value) +#scheduler_retry_delay=2 + +# Seconds between getting fresh cell info from db. (integer +# value) +#db_check_interval=60 + +# Multiplier used to weigh mute children. (The value should +# be negative.) (floating point value) +#mute_weight_multiplier=-10.0 + +# Weight value assigned to mute children. (The value should +# be positive.) (floating point value) +#mute_weight_value=1000.0 + +# Number of seconds after which a lack of capability and +# capacity updates signals the child cell is to be treated as +# a mute. (integer value) +#mute_child_interval=300 + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=10.0 + +############# +# BAREMETAL # +############# +[baremetal] +# The backend to use for bare-metal database (string value) +#db_backend=sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# bare-metal database (string value) +#sql_connection=sqlite:///$state_path/baremetal_$sqlite_db + +# Whether baremetal compute injects password or not (boolean value) +#inject_password=true + +# Template file for injected network (string value) +#injected_network_template=$pybasedir/nova/virt/baremetal/interfaces.template + +# Baremetal VIF driver. (string value) +#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver + +# Baremetal volume driver. (string value) +#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver + +# a list of additional capabilities corresponding to +# instance_type_extra_specs for this compute host to +# advertise. Valid entries are name=value, pairs For example, +# "key1:val1, key2:val2" (list value) +#instance_type_extra_specs= + +# Baremetal driver back-end (pxe or tilera) (string value) +#driver=nova.virt.baremetal.pxe.PXE + +# Baremetal power management method (string value) +#power_manager=nova.virt.baremetal.ipmi.IPMI + +# Baremetal compute node's tftp root path (string value) +#tftp_root=/tftpboot + +# path to baremetal terminal program (string value) +#terminal=shellinaboxd + +# path to baremetal terminal SSL cert(PEM) (string value) +#terminal_cert_dir= + +# path to directory stores pidfiles of baremetal_terminal +# (string value) +#terminal_pid_dir=$state_path/baremetal/console + +# maximal number of retries for IPMI operations (integer +# value) +#ipmi_power_retry=5 + +# Default kernel image ID used in deployment phase (string +# value) +#deploy_kernel= + +# Default ramdisk image ID used in deployment phase (string +# value) +#deploy_ramdisk= + +# Template file for injected network config (string value) +#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template + +# additional append parameters for baremetal PXE boot (string +# value) +#pxe_append_params= + +# Template file for PXE configuration (string value) +#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template + +# Timeout for PXE deployments. Default: 0 (unlimited) (integer +# value) +#pxe_deploy_timeout=0 + +# ip or name to virtual power host (string value) +#virtual_power_ssh_host= + +# base command to use for virtual power(vbox,virsh) (string +# value) +#virtual_power_type=vbox + +# user to execute virtual power commands as (string value) +#virtual_power_host_user= + +# password for virtual power host_user (string value) +#virtual_power_host_pass= + +# Do not set this out of dev/test environments. If a node does +# not have a fixed PXE IP address, volumes are exported with +# globally opened ACL (boolean value) +#use_unsafe_iscsi=false + +# iSCSI IQN prefix used in baremetal volume connections. +# (string value) +#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal + +########## +# VMWARE # +########## +[vmware] +# Name of Integration Bridge (string value) +#integration_bridge=br-int + +######### +# SPICE # +######### +[spice] +# location of spice html5 console proxy, in the form +# "http://www.example.com:6082/spice_auto.html" (string value) +#html5proxy_base_url=http://localhost:6082/spice_auto.html + +# IP address on which instance spice server should listen (string value) +#server_listen=0.0.0.0 + +# the address to which proxy clients (like nova-spicehtml5proxy) should connect (string value) +#server_proxyclient_address=$my_ip + +# enable spice related features (boolean value) +#enabled=true +enabled=false + +# enable spice guest agent support (boolean value) +#agent_enabled=true + +# keymap for spice (string value) +#keymap=en-us + +###################### +# Keystone authtoken # +###################### +[keystone_authtoken] +identity_uri = ##IDENTITY_URI## +auth_uri = ##KEYSTONE_INTERNAL_URL## +auth_port = 35357 +auth_protocol = http +admin_tenant_name = service +admin_user = ##NOVA_SERVICE_USER## +admin_password = ##NOVA_SERVICE_PASSWORD## +auth_version = v2.0 + +########### +# COMPUTE # +########### +compute_driver=libvirt.LibvirtDriver +instance_name_template=instance-%08x +api_paste_config=/etc/nova/api-paste.ini + +# COMPUTE/APIS: if you have separate configs for separate services +# # this flag is required for both nova-api and nova-compute +allow_resize_to_same_host=True + +############ +## LIBVIRT # +############ +[libvirt] +# Actual testing hardware does not support hardware acceleration +# so in this step we will configure libvirt to use qemu instead of KVM +virt_type=qemu diff --git a/openstack/etc/nova/nova.conf.example b/openstack/etc/nova/nova.conf.example new file mode 100644 index 00000000..999574ca --- /dev/null +++ b/openstack/etc/nova/nova.conf.example @@ -0,0 +1,3698 @@ + + +[DEFAULT] + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +#rabbit_userid=guest + +# The RabbitMQ password. (string value) +#rabbit_password=guest + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=nova + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in nova.availability_zones +# + +# The availability_zone to show internal services under +# (string value) +#internal_service_availability_zone=internal + +# Default compute node availability_zone (string value) +#default_availability_zone=nova + + +# +# Options defined in nova.crypto +# + +# Filename of root CA (string value) +#ca_file=cacert.pem + +# Filename of private key (string value) +#key_file=private/cakey.pem + +# Filename of root Certificate Revocation List (string value) +#crl_file=crl.pem + +# Where we keep our keys (string value) +#keys_path=$state_path/keys + +# Where we keep our root CA (string value) +#ca_path=$state_path/CA + +# Should we use a CA for each project? (boolean value) +#use_project_ca=false + +# Subject for certificate for users, %s for project, user, +# timestamp (string value) +#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s + +# Subject for certificate for projects, %s for project, +# timestamp (string value) +#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s + + +# +# Options defined in nova.exception +# + +# Make exception message format errors fatal (boolean value) +#fatal_exception_format_errors=false + + +# +# Options defined in nova.netconf +# + +# IP address of this host (string value) +#my_ip=10.0.0.1 + +# Name of this node. This can be an opaque identifier. It is +# not necessarily a hostname, FQDN, or IP address. However, +# the node name must be valid within an AMQP key, and if using +# ZeroMQ, a valid hostname, FQDN, or IP address (string value) +#host=nova + +# Use IPv6 (boolean value) +#use_ipv6=false + + +# +# Options defined in nova.notifications +# + +# If set, send compute.instance.update notifications on +# instance state changes. Valid values are None for no +# notifications, "vm_state" for notifications on VM state +# changes, or "vm_and_task_state" for notifications on VM and +# task state changes. (string value) +#notify_on_state_change= + +# If set, send api.fault notifications on caught exceptions in +# the API service. (boolean value) +#notify_api_faults=false + +# Default notification level for outgoing notifications +# (string value) +#default_notification_level=INFO + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in nova.paths +# + +# Directory where the nova python module is installed (string +# value) +#pybasedir=/usr/lib/python/site-packages + +# Directory where nova binaries are installed (string value) +#bindir=/usr/local/bin + +# Top-level directory for maintaining nova's state (string +# value) +#state_path=$pybasedir + + +# +# Options defined in nova.policy +# + +# JSON file representing policy (string value) +#policy_file=policy.json + +# Rule checked when requested rule is not found (string value) +#policy_default_rule=default + + +# +# Options defined in nova.quota +# + +# Number of instances allowed per project (integer value) +#quota_instances=10 + +# Number of instance cores allowed per project (integer value) +#quota_cores=20 + +# Megabytes of instance RAM allowed per project (integer +# value) +#quota_ram=51200 + +# Number of floating IPs allowed per project (integer value) +#quota_floating_ips=10 + +# Number of fixed IPs allowed per project (this should be at +# least the number of instances allowed) (integer value) +#quota_fixed_ips=-1 + +# Number of metadata items allowed per instance (integer +# value) +#quota_metadata_items=128 + +# Number of injected files allowed (integer value) +#quota_injected_files=5 + +# Number of bytes allowed per injected file (integer value) +#quota_injected_file_content_bytes=10240 + +# Number of bytes allowed per injected file path (integer +# value) +#quota_injected_file_path_bytes=255 + +# Number of security groups per project (integer value) +#quota_security_groups=10 + +# Number of security rules per security group (integer value) +#quota_security_group_rules=20 + +# Number of key pairs per user (integer value) +#quota_key_pairs=100 + +# Number of seconds until a reservation expires (integer +# value) +#reservation_expire=86400 + +# Count of reservations until usage is refreshed (integer +# value) +#until_refresh=0 + +# Number of seconds between subsequent usage refreshes +# (integer value) +#max_age=0 + +# Default driver to use for quota checks (string value) +#quota_driver=nova.quota.DbQuotaDriver + + +# +# Options defined in nova.service +# + +# Seconds between nodes reporting state to datastore (integer +# value) +#report_interval=10 + +# Enable periodic tasks (boolean value) +#periodic_enable=true + +# Range of seconds to randomly delay when starting the +# periodic task scheduler to reduce stampeding. (Disable by +# setting to 0) (integer value) +#periodic_fuzzy_delay=60 + +# A list of APIs to enable by default (list value) +#enabled_apis=ec2,osapi_compute,metadata + +# A list of APIs with enabled SSL (list value) +#enabled_ssl_apis= + +# The IP address on which the EC2 API will listen. (string +# value) +#ec2_listen=0.0.0.0 + +# The port on which the EC2 API will listen. (integer value) +#ec2_listen_port=8773 + +# Number of workers for EC2 API service. The default will be +# equal to the number of CPUs available. (integer value) +#ec2_workers= + +# The IP address on which the OpenStack API will listen. +# (string value) +#osapi_compute_listen=0.0.0.0 + +# The port on which the OpenStack API will listen. (integer +# value) +#osapi_compute_listen_port=8774 + +# Number of workers for OpenStack API service. The default +# will be the number of CPUs available. (integer value) +#osapi_compute_workers= + +# OpenStack metadata service manager (string value) +#metadata_manager=nova.api.manager.MetadataManager + +# The IP address on which the metadata API will listen. +# (string value) +#metadata_listen=0.0.0.0 + +# The port on which the metadata API will listen. (integer +# value) +#metadata_listen_port=8775 + +# Number of workers for metadata service. The default will be +# the number of CPUs available. (integer value) +#metadata_workers= + +# Full class name for the Manager for compute (string value) +#compute_manager=nova.compute.manager.ComputeManager + +# Full class name for the Manager for console proxy (string +# value) +#console_manager=nova.console.manager.ConsoleProxyManager + +# Manager for console auth (string value) +#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager + +# Full class name for the Manager for cert (string value) +#cert_manager=nova.cert.manager.CertManager + +# Full class name for the Manager for network (string value) +#network_manager=nova.network.manager.VlanManager + +# Full class name for the Manager for scheduler (string value) +#scheduler_manager=nova.scheduler.manager.SchedulerManager + +# Maximum time since last check-in for up service (integer +# value) +#service_down_time=60 + + +# +# Options defined in nova.test +# + +# File name of clean sqlite db (string value) +#sqlite_clean_db=clean.sqlite + + +# +# Options defined in nova.utils +# + +# Whether to log monkey patching (boolean value) +#monkey_patch=false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator + +# Length of generated instance admin passwords (integer value) +#password_length=12 + +# Time period to generate instance usages for. Time period +# must be hour, day, month or year (string value) +#instance_usage_audit_period=month + +# Path to the rootwrap configuration file to use for running +# commands as root (string value) +#rootwrap_config=/etc/nova/rootwrap.conf + +# Explicitly specify the temporary working directory (string +# value) +#tempdir= + + +# +# Options defined in nova.wsgi +# + +# File name for the paste.deploy config for nova-api (string +# value) +#api_paste_config=api-paste.ini + +# A python format string that is used as the template to +# generate log lines. The following values can be formatted +# into it: client_ip, date_time, request_line, status_code, +# body_length, wall_seconds. (string value) +#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f + +# CA certificate file to use to verify connecting clients +# (string value) +#ssl_ca_file= + +# SSL certificate of API server (string value) +#ssl_cert_file= + +# SSL private key of API server (string value) +#ssl_key_file= + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepidle=600 + +# Size of the pool of greenthreads used by wsgi (integer +# value) +#wsgi_default_pool_size=1000 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + + +# +# Options defined in nova.api.auth +# + +# Whether to use per-user rate limiting for the api. This +# option is only used by v2 api. Rate limiting is removed from +# v3 api. (boolean value) +#api_rate_limit=false + +# The strategy to use for auth: noauth or keystone. (string +# value) +#auth_strategy=noauth + +# Treat X-Forwarded-For as the canonical remote address. Only +# enable this if you have a sanitizing proxy. (boolean value) +#use_forwarded_for=false + + +# +# Options defined in nova.api.ec2 +# + +# Number of failed auths before lockout. (integer value) +#lockout_attempts=5 + +# Number of minutes to lockout if triggered. (integer value) +#lockout_minutes=15 + +# Number of minutes for lockout window. (integer value) +#lockout_window=15 + +# URL to get token from ec2 request. (string value) +#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens + +# Return the IP address as private dns hostname in describe +# instances (boolean value) +#ec2_private_dns_show_ip=false + +# Validate security group names according to EC2 specification +# (boolean value) +#ec2_strict_validation=true + +# Time in seconds before ec2 timestamp expires (integer value) +#ec2_timestamp_expiry=300 + + +# +# Options defined in nova.api.ec2.cloud +# + +# The IP address of the EC2 API server (string value) +#ec2_host=$my_ip + +# The internal IP address of the EC2 API server (string value) +#ec2_dmz_host=$my_ip + +# The port of the EC2 API server (integer value) +#ec2_port=8773 + +# The protocol to use when connecting to the EC2 API server +# (http, https) (string value) +#ec2_scheme=http + +# The path prefix used to call the ec2 API server (string +# value) +#ec2_path=/services/Cloud + +# List of region=fqdn pairs separated by commas (list value) +#region_list= + + +# +# Options defined in nova.api.metadata.base +# + +# List of metadata versions to skip placing into the config +# drive (string value) +#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 + +# Driver to use for vendor data (string value) +#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData + + +# +# Options defined in nova.api.metadata.handler +# + +# Set flag to indicate Neutron will proxy metadata requests +# and resolve instance ids. (boolean value) +#service_neutron_metadata_proxy=false + +# Shared secret to validate proxies Neutron metadata requests +# (string value) +#neutron_metadata_proxy_shared_secret= + + +# +# Options defined in nova.api.metadata.vendordata_json +# + +# File to load json formatted vendor data from (string value) +#vendordata_jsonfile_path= + + +# +# Options defined in nova.api.openstack.common +# + +# The maximum number of items returned in a single response +# from a collection resource (integer value) +#osapi_max_limit=1000 + +# Base URL that will be presented to users in links to the +# OpenStack Compute API (string value) +#osapi_compute_link_prefix= + +# Base URL that will be presented to users in links to glance +# resources (string value) +#osapi_glance_link_prefix= + + +# +# Options defined in nova.api.openstack.compute +# + +# Permit instance snapshot operations. (boolean value) +#allow_instance_snapshots=true + + +# +# Options defined in nova.api.openstack.compute.contrib +# + +# Specify list of extensions to load when using +# osapi_compute_extension option with +# nova.api.openstack.compute.contrib.select_extensions (list +# value) +#osapi_compute_ext_list= + + +# +# Options defined in nova.api.openstack.compute.contrib.fping +# + +# Full path to fping. (string value) +#fping_path=/usr/sbin/fping + + +# +# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks +# + +# Enables or disables quota checking for tenant networks +# (boolean value) +#enable_network_quota=false + +# Control for checking for default networks (string value) +#use_neutron_default_nets=False + +# Default tenant id when creating neutron networks (string +# value) +#neutron_default_tenant_id=default + + +# +# Options defined in nova.api.openstack.compute.extensions +# + +# osapi compute extension to load (multi valued) +#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions + + +# +# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses +# + +# List of instance states that should hide network info (list +# value) +#osapi_hide_server_address_states=building + + +# +# Options defined in nova.api.openstack.compute.servers +# + +# Enables returning of the instance password by the relevant +# server API calls such as create, rebuild or rescue, If the +# hypervisor does not support password injection then the +# password returned will not be correct (boolean value) +#enable_instance_password=true + + +# +# Options defined in nova.api.sizelimit +# + +# The maximum body size per each osapi request(bytes) (integer +# value) +#osapi_max_request_body_size=114688 + + +# +# Options defined in nova.cert.rpcapi +# + +# The topic cert nodes listen on (string value) +#cert_topic=cert + + +# +# Options defined in nova.cloudpipe.pipelib +# + +# Image ID used when starting up a cloudpipe vpn server +# (string value) +#vpn_image_id=0 + +# Flavor for vpn instances (string value) +#vpn_flavor=m1.tiny + +# Template for cloudpipe instance boot script (string value) +#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template + +# Network to push into openvpn config (string value) +#dmz_net=10.0.0.0 + +# Netmask to push into openvpn config (string value) +#dmz_mask=255.255.255.0 + +# Suffix to add to project name for vpn key and secgroups +# (string value) +#vpn_key_suffix=-vpn + + +# +# Options defined in nova.cmd.novnc +# + +# Record sessions to FILE.[session_number] (boolean value) +#record=false + +# Become a daemon (background process) (boolean value) +#daemon=false + +# Disallow non-encrypted connections (boolean value) +#ssl_only=false + +# Source is ipv6 (boolean value) +#source_is_ipv6=false + +# SSL certificate file (string value) +#cert=self.pem + +# SSL key file (if separate from cert) (string value) +#key= + +# Run webserver on same port. Serve files from DIR. (string +# value) +#web=/usr/share/spice-html5 + + +# +# Options defined in nova.cmd.novncproxy +# + +# Host on which to listen for incoming requests (string value) +#novncproxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer +# value) +#novncproxy_port=6080 + + +# +# Options defined in nova.cmd.spicehtml5proxy +# + +# Host on which to listen for incoming requests (string value) +#spicehtml5proxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer +# value) +#spicehtml5proxy_port=6082 + + +# +# Options defined in nova.compute.api +# + +# Allow destination machine to match source for resize. Useful +# when testing in single-host environments. (boolean value) +#allow_resize_to_same_host=false + +# Allow migrate machine to the same host. Useful when testing +# in single-host environments. (boolean value) +#allow_migrate_to_same_host=false + +# Availability zone to use when user doesn't specify one +# (string value) +#default_schedule_zone= + +# These are image properties which a snapshot should not +# inherit from an instance (list value) +#non_inheritable_image_properties=cache_in_nova,bittorrent + +# Kernel image that indicates not to use a kernel, but to use +# a raw disk image instead (string value) +#null_kernel=nokernel + +# When creating multiple instances with a single request using +# the os-multiple-create API extension, this template will be +# used to build the display name for each instance. The +# benefit is that the instances end up with different +# hostnames. To restore legacy behavior of every instance +# having the same name, set this option to "%(name)s". Valid +# keys for the template are: name, uuid, count. (string value) +#multi_instance_display_name_template=%(name)s-%(uuid)s + +# Maximum number of devices that will result in a local image +# being created on the hypervisor node. Setting this to 0 +# means nova will allow only boot from volume. A negative +# number means unlimited. (integer value) +#max_local_block_devices=3 + + +# +# Options defined in nova.compute.flavors +# + +# Default flavor to use for the EC2 API only. The Nova API +# does not support a default flavor. (string value) +#default_flavor=m1.small + + +# +# Options defined in nova.compute.manager +# + +# Console proxy host to use to connect to instances on this +# host. (string value) +#console_host=nova + +# Name of network to use to set access IPs for instances +# (string value) +#default_access_ip_network_name= + +# Whether to batch up the application of IPTables rules during +# a host restart and apply all at the end of the init phase +# (boolean value) +#defer_iptables_apply=false + +# Where instances are stored on disk (string value) +#instances_path=$state_path/instances + +# Generate periodic compute.instance.exists notifications +# (boolean value) +#instance_usage_audit=false + +# Number of 1 second retries needed in live_migration (integer +# value) +#live_migration_retry_count=30 + +# Whether to start guests that were running before the host +# rebooted (boolean value) +#resume_guests_state_on_host_boot=false + +# Number of times to retry network allocation on failures +# (integer value) +#network_allocate_retries=0 + +# The number of times to attempt to reap an instance's files. +# (integer value) +#maximum_instance_delete_attempts=5 + +# Interval to pull network bandwidth usage info. Not supported +# on all hypervisors. Set to 0 to disable. (integer value) +#bandwidth_poll_interval=600 + +# Interval to sync power states between the database and the +# hypervisor (integer value) +#sync_power_state_interval=600 + +# Number of seconds between instance info_cache self healing +# updates (integer value) +#heal_instance_info_cache_interval=60 + +# Interval in seconds for reclaiming deleted instances +# (integer value) +#reclaim_instance_interval=0 + +# Interval in seconds for gathering volume usages (integer +# value) +#volume_usage_poll_interval=0 + +# Interval in seconds for polling shelved instances to offload +# (integer value) +#shelved_poll_interval=3600 + +# Time in seconds before a shelved instance is eligible for +# removing from a host. -1 never offload, 0 offload when +# shelved (integer value) +#shelved_offload_time=0 + +# Interval in seconds for retrying failed instance file +# deletes (integer value) +#instance_delete_interval=300 + +# Action to take if a running deleted instance is +# detected.Valid options are 'noop', 'log', 'shutdown', or +# 'reap'. Set to 'noop' to take no action. (string value) +#running_deleted_instance_action=reap + +# Number of seconds to wait between runs of the cleanup task. +# (integer value) +#running_deleted_instance_poll_interval=1800 + +# Number of seconds after being deleted when a running +# instance should be considered eligible for cleanup. (integer +# value) +#running_deleted_instance_timeout=0 + +# Automatically hard reboot an instance if it has been stuck +# in a rebooting state longer than N seconds. Set to 0 to +# disable. (integer value) +#reboot_timeout=0 + +# Amount of time in seconds an instance can be in BUILD before +# going into ERROR status.Set to 0 to disable. (integer value) +#instance_build_timeout=0 + +# Automatically unrescue an instance after N seconds. Set to 0 +# to disable. (integer value) +#rescue_timeout=0 + +# Automatically confirm resizes after N seconds. Set to 0 to +# disable. (integer value) +#resize_confirm_window=0 + + +# +# Options defined in nova.compute.monitors +# + +# Monitor classes available to the compute which may be +# specified more than once. (multi valued) +#compute_available_monitors=nova.compute.monitors.all_monitors + +# A list of monitors that can be used for getting compute +# metrics. (list value) +#compute_monitors= + + +# +# Options defined in nova.compute.resource_tracker +# + +# Amount of disk in MB to reserve for the host (integer value) +#reserved_host_disk_mb=0 + +# Amount of memory in MB to reserve for the host (integer +# value) +#reserved_host_memory_mb=512 + +# Class that will manage stats for the local compute host +# (string value) +#compute_stats_class=nova.compute.stats.Stats + + +# +# Options defined in nova.compute.rpcapi +# + +# The topic compute nodes listen on (string value) +#compute_topic=compute + + +# +# Options defined in nova.conductor.tasks.live_migrate +# + +# Number of times to retry live-migration before failing. If +# == -1, try until out of hosts. If == 0, only try once, no +# retries. (integer value) +#migrate_max_retries=-1 + + +# +# Options defined in nova.console.manager +# + +# Driver to use for the console proxy (string value) +#console_driver=nova.console.xvp.XVPConsoleProxy + +# Stub calls to compute worker for tests (boolean value) +#stub_compute=false + +# Publicly visible name for this console host (string value) +#console_public_hostname=nova + + +# +# Options defined in nova.console.rpcapi +# + +# The topic console proxy nodes listen on (string value) +#console_topic=console + + +# +# Options defined in nova.console.vmrc +# + +# Port for VMware VMRC connections (integer value) +#console_vmrc_port=443 + +# Number of retries for retrieving VMRC information (integer +# value) +#console_vmrc_error_retries=10 + + +# +# Options defined in nova.console.xvp +# + +# XVP conf template (string value) +#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template + +# Generated XVP conf file (string value) +#console_xvp_conf=/etc/xvp.conf + +# XVP master process pid file (string value) +#console_xvp_pid=/var/run/xvp.pid + +# XVP log file (string value) +#console_xvp_log=/var/log/xvp.log + +# Port for XVP to multiplex VNC connections on (integer value) +#console_xvp_multiplex_port=5900 + + +# +# Options defined in nova.consoleauth +# + +# The topic console auth proxy nodes listen on (string value) +#consoleauth_topic=consoleauth + + +# +# Options defined in nova.consoleauth.manager +# + +# How many seconds before deleting tokens (integer value) +#console_token_ttl=600 + + +# +# Options defined in nova.db.api +# + +# Services to be added to the available pool on create +# (boolean value) +#enable_new_services=true + +# Template string to be used to generate instance names +# (string value) +#instance_name_template=instance-%08x + +# Template string to be used to generate snapshot names +# (string value) +#snapshot_name_template=snapshot-%s + + +# +# Options defined in nova.db.base +# + +# The driver to use for database access (string value) +#db_driver=nova.db + + +# +# Options defined in nova.db.sqlalchemy.api +# + +# When set, compute API will consider duplicate hostnames +# invalid within the specified scope, regardless of case. +# Should be empty, "project" or "global". (string value) +#osapi_compute_unique_server_name_scope= + + +# +# Options defined in nova.image.glance +# + +# Default glance hostname or IP address (string value) +#glance_host=$my_ip + +# Default glance port (integer value) +#glance_port=9292 + +# Default protocol to use when connecting to glance. Set to +# https for SSL. (string value) +#glance_protocol=http + +# A list of the glance api servers available to nova. Prefix +# with https:// for ssl-based glance api servers. +# ([hostname|ip]:port) (list value) +#glance_api_servers=$glance_host:$glance_port + +# Allow to perform insecure SSL (https) requests to glance +# (boolean value) +#glance_api_insecure=false + +# Number of retries when downloading an image from glance +# (integer value) +#glance_num_retries=0 + +# A list of url scheme that can be downloaded directly via the +# direct_url. Currently supported schemes: [file]. (list +# value) +#allowed_direct_url_schemes= + + +# +# Options defined in nova.image.s3 +# + +# Parent directory for tempdir used for image decryption +# (string value) +#image_decryption_dir=/tmp + +# Hostname or IP for OpenStack to use when accessing the S3 +# api (string value) +#s3_host=$my_ip + +# Port used when accessing the S3 api (integer value) +#s3_port=3333 + +# Access key to use for S3 server for images (string value) +#s3_access_key=notchecked + +# Secret key to use for S3 server for images (string value) +#s3_secret_key=notchecked + +# Whether to use SSL when talking to S3 (boolean value) +#s3_use_ssl=false + +# Whether to affix the tenant id to the access key when +# downloading from S3 (boolean value) +#s3_affix_tenant=false + + +# +# Options defined in nova.ipv6.api +# + +# Backend to use for IPv6 generation (string value) +#ipv6_backend=rfc2462 + + +# +# Options defined in nova.network +# + +# The full class name of the network API class to use (string +# value) +#network_api_class=nova.network.api.API + + +# +# Options defined in nova.network.driver +# + +# Driver to use for network creation (string value) +#network_driver=nova.network.linux_net + + +# +# Options defined in nova.network.floating_ips +# + +# Default pool for floating IPs (string value) +#default_floating_pool=nova + +# Autoassigning floating IP to VM (boolean value) +#auto_assign_floating_ip=false + +# Full class name for the DNS Manager for floating IPs (string +# value) +#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Manager for instance IPs (string +# value) +#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Zone for instance IPs (string +# value) +#instance_dns_domain= + + +# +# Options defined in nova.network.ldapdns +# + +# URL for LDAP server which will store DNS entries (string +# value) +#ldap_dns_url=ldap://ldap.example.com:389 + +# User for LDAP DNS (string value) +#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org + +# Password for LDAP DNS (string value) +#ldap_dns_password=password + +# Hostmaster for LDAP DNS driver Statement of Authority +# (string value) +#ldap_dns_soa_hostmaster=hostmaster@example.org + +# DNS Servers for LDAP DNS driver (multi valued) +#ldap_dns_servers=dns.example.org + +# Base DN for DNS entries in LDAP (string value) +#ldap_dns_base_dn=ou=hosts,dc=example,dc=org + +# Refresh interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_refresh=1800 + +# Retry interval (in seconds) for LDAP DNS driver Statement of +# Authority (string value) +#ldap_dns_soa_retry=3600 + +# Expiry interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_expiry=86400 + +# Minimum interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_minimum=7200 + + +# +# Options defined in nova.network.linux_net +# + +# Location of flagfiles for dhcpbridge (multi valued) +#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf + +# Location to keep network config files (string value) +#networks_path=$state_path/networks + +# Interface for public IP addresses (string value) +#public_interface=eth0 + +# MTU setting for network interface (integer value) +#network_device_mtu= + +# Location of nova-dhcpbridge (string value) +#dhcpbridge=$bindir/nova-dhcpbridge + +# Public IP of network host (string value) +#routing_source_ip=$my_ip + +# Lifetime of a DHCP lease in seconds (integer value) +#dhcp_lease_time=120 + +# If set, uses specific DNS server for dnsmasq. Can be +# specified multiple times. (multi valued) +#dns_server= + +# If set, uses the dns1 and dns2 from the network ref. as dns +# servers. (boolean value) +#use_network_dns_servers=false + +# A list of dmz range that should be accepted (list value) +#dmz_cidr= + +# Traffic to this range will always be snatted to the fallback +# ip, even if it would normally be bridged out of the node. +# Can be specified multiple times. (multi valued) +#force_snat_range= + +# Override the default dnsmasq settings with this file (string +# value) +#dnsmasq_config_file= + +# Driver used to create ethernet devices. (string value) +#linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver + +# Name of Open vSwitch bridge used with linuxnet (string +# value) +#linuxnet_ovs_integration_bridge=br-int + +# Send gratuitous ARPs for HA setup (boolean value) +#send_arp_for_ha=false + +# Send this many gratuitous ARPs for HA setup (integer value) +#send_arp_for_ha_count=3 + +# Use single default gateway. Only first nic of vm will get +# default gateway from dhcp server (boolean value) +#use_single_default_gateway=false + +# An interface that bridges can forward to. If this is set to +# all then all traffic will be forwarded. Can be specified +# multiple times. (multi valued) +#forward_bridge_interface=all + +# The IP address for the metadata API server (string value) +#metadata_host=$my_ip + +# The port for the metadata API port (integer value) +#metadata_port=8775 + +# Regular expression to match iptables rule that should always +# be on the top. (string value) +#iptables_top_regex= + +# Regular expression to match iptables rule that should always +# be on the bottom. (string value) +#iptables_bottom_regex= + +# The table that iptables to jump to when a packet is to be +# dropped. (string value) +#iptables_drop_action=DROP + +# Amount of time, in seconds, that ovs_vsctl should wait for a +# response from the database. 0 is to wait forever. (integer +# value) +#ovs_vsctl_timeout=120 + +# If passed, use fake network devices and addresses (boolean +# value) +#fake_network=false + + +# +# Options defined in nova.network.manager +# + +# Bridge for simple network instances (string value) +#flat_network_bridge= + +# DNS server for simple network (string value) +#flat_network_dns=8.8.4.4 + +# Whether to attempt to inject network setup into guest +# (boolean value) +#flat_injected=false + +# FlatDhcp will bridge into this interface if set (string +# value) +#flat_interface= + +# First VLAN for private networks (integer value) +#vlan_start=100 + +# VLANs will bridge into this interface if set (string value) +#vlan_interface= + +# Number of networks to support (integer value) +#num_networks=1 + +# Public IP for the cloudpipe VPN servers (string value) +#vpn_ip=$my_ip + +# First Vpn port for private networks (integer value) +#vpn_start=1000 + +# Number of addresses in each private subnet (integer value) +#network_size=256 + +# Fixed IPv6 address block (string value) +#fixed_range_v6=fd00::/48 + +# Default IPv4 gateway (string value) +#gateway= + +# Default IPv6 gateway (string value) +#gateway_v6= + +# Number of addresses reserved for vpn clients (integer value) +#cnt_vpn_clients=0 + +# Seconds after which a deallocated IP is disassociated +# (integer value) +#fixed_ip_disassociate_timeout=600 + +# Number of attempts to create unique mac address (integer +# value) +#create_unique_mac_address_attempts=5 + +# If True, skip using the queue and make local calls (boolean +# value) +#fake_call=false + +# If True, unused gateway devices (VLAN and bridge) are +# deleted in VLAN network mode with multi hosted networks +# (boolean value) +#teardown_unused_network_gateway=false + +# If True, send a dhcp release on instance termination +# (boolean value) +#force_dhcp_release=true + +# If True in multi_host mode, all compute hosts share the same +# dhcp address. The same IP address used for DHCP will be +# added on each nova-network node which is only visible to the +# vms on the same host. (boolean value) +#share_dhcp_address=false + +# If True, when a DNS entry must be updated, it sends a fanout +# cast to all network hosts to update their DNS entries in +# multi host mode (boolean value) +#update_dns_entries=false + +# Number of seconds to wait between runs of updates to DNS +# entries. (integer value) +#dns_update_periodic_interval=-1 + +# Domain to use for building the hostnames (string value) +#dhcp_domain=novalocal + +# Indicates underlying L3 management library (string value) +#l3_lib=nova.network.l3.LinuxNetL3 + + +# +# Options defined in nova.network.neutronv2.api +# + +# URL for connecting to neutron (string value) +#neutron_url=http://127.0.0.1:9696 + +# Timeout value for connecting to neutron in seconds (integer +# value) +#neutron_url_timeout=30 + +# Username for connecting to neutron in admin context (string +# value) +#neutron_admin_username= + +# Password for connecting to neutron in admin context (string +# value) +#neutron_admin_password= + +# Tenant id for connecting to neutron in admin context (string +# value) +#neutron_admin_tenant_id= + +# Tenant name for connecting to neutron in admin context. This +# option is mutually exclusive with neutron_admin_tenant_id. +# Note that with Keystone V3 tenant names are only unique +# within a domain. (string value) +#neutron_admin_tenant_name= + +# Region name for connecting to neutron in admin context +# (string value) +#neutron_region_name= + +# Authorization URL for connecting to neutron in admin context +# (string value) +#neutron_admin_auth_url=http://localhost:5000/v2.0 + +# If set, ignore any SSL validation issues (boolean value) +#neutron_api_insecure=false + +# Authorization strategy for connecting to neutron in admin +# context (string value) +#neutron_auth_strategy=keystone + +# Name of Integration Bridge used by Open vSwitch (string +# value) +#neutron_ovs_bridge=br-int + +# Number of seconds before querying neutron for extensions +# (integer value) +#neutron_extension_sync_interval=600 + +# Location of CA certificates file to use for neutron client +# requests. (string value) +#neutron_ca_certificates_file= + + +# +# Options defined in nova.network.rpcapi +# + +# The topic network nodes listen on (string value) +#network_topic=network + +# Default value for multi_host in networks. Also, if set, some +# rpc network calls will be sent directly to host. (boolean +# value) +#multi_host=false + + +# +# Options defined in nova.network.security_group.openstack_driver +# + +# The full class name of the security API class (string value) +#security_group_api=nova + + +# +# Options defined in nova.objectstore.s3server +# + +# Path to S3 buckets (string value) +#buckets_path=$state_path/buckets + +# IP address for S3 API to listen (string value) +#s3_listen=0.0.0.0 + +# Port for S3 API to listen (integer value) +#s3_listen_port=3333 + + +# +# Options defined in nova.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in nova.openstack.common.lockutils +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +#lock_path= + + +# +# Options defined in nova.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error (boolean value) +#use_stderr=true + +# Format string to use for log messages with context (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN + +# Publish error events (boolean value) +#publish_errors=false + +# Make deprecations fatal (boolean value) +#fatal_deprecations=false + +# If an instance is passed with the log message, format it +# like this (string value) +#instance_format="[instance: %(uuid)s] " + +# If an instance UUID is passed with the log message, format +# it like this (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of logging configuration file. It does not disable +# existing loggers, but just appends specified logging +# configuration to any other existing logging options. Please +# see the Python logging module documentation for details on +# logging configuration files. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and then will be changed in J to honor RFC5424 +# (boolean value) +#use_syslog=false + +# (Optional) Use syslog rfc5424 format for logging. If +# enabled, will add APP-NAME (RFC5424) before the MSG part of +# the syslog message. The old format without APP-NAME is +# deprecated in I, and will be removed in J. (boolean value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in nova.openstack.common.memorycache +# + +# Memcached servers or None for in process cache. (list value) +#memcached_servers= + + +# +# Options defined in nova.openstack.common.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + + +# +# Options defined in nova.pci.pci_request +# + +# An alias for a PCI passthrough device requirement. This +# allows users to specify the alias in the extra_spec for a +# flavor, without needing to repeat all the PCI property +# requirements. For example: pci_alias = { "name": +# "QuicAssist", "product_id": "0443", "vendor_id": "8086", +# "device_type": "ACCEL" } defines an alias for the Intel +# QuickAssist card. (multi valued) (multi valued) +#pci_alias= + + +# +# Options defined in nova.pci.pci_whitelist +# + +# White list of PCI devices available to VMs. For example: +# pci_passthrough_whitelist = [{"vendor_id": "8086", +# "product_id": "0443"}] (multi valued) +#pci_passthrough_whitelist= + + +# +# Options defined in nova.scheduler.driver +# + +# The scheduler host manager class to use (string value) +#scheduler_host_manager=nova.scheduler.host_manager.HostManager + +# Maximum number of attempts to schedule an instance (integer +# value) +#scheduler_max_attempts=3 + + +# +# Options defined in nova.scheduler.filter_scheduler +# + +# New instances will be scheduled on a host chosen randomly +# from a subset of the N best hosts. This property defines the +# subset size that a host is chosen from. A value of 1 chooses +# the first host returned by the weighing functions. This +# value must be at least 1. Any value less than 1 will be +# ignored, and 1 will be used instead (integer value) +#scheduler_host_subset_size=1 + + +# +# Options defined in nova.scheduler.filters.aggregate_image_properties_isolation +# + +# Force the filter to consider only keys matching the given +# namespace. (string value) +#aggregate_image_properties_isolation_namespace= + +# The separator used between the namespace and keys (string +# value) +#aggregate_image_properties_isolation_separator=. + + +# +# Options defined in nova.scheduler.filters.core_filter +# + +# Virtual CPU to physical CPU allocation ratio which affects +# all CPU filters. This configuration specifies a global ratio +# for CoreFilter. For AggregateCoreFilter, it will fall back +# to this configuration value if no per-aggregate setting +# found. (floating point value) +#cpu_allocation_ratio=16.0 + + +# +# Options defined in nova.scheduler.filters.disk_filter +# + +# Virtual disk to physical disk allocation ratio (floating +# point value) +#disk_allocation_ratio=1.0 + + +# +# Options defined in nova.scheduler.filters.io_ops_filter +# + +# Ignore hosts that have too many +# builds/resizes/snaps/migrations (integer value) +#max_io_ops_per_host=8 + + +# +# Options defined in nova.scheduler.filters.isolated_hosts_filter +# + +# Images to run on isolated host (list value) +#isolated_images= + +# Host reserved for specific images (list value) +#isolated_hosts= + +# Whether to force isolated hosts to run only isolated images +# (boolean value) +#restrict_isolated_hosts_to_isolated_images=true + + +# +# Options defined in nova.scheduler.filters.num_instances_filter +# + +# Ignore hosts that have too many instances (integer value) +#max_instances_per_host=50 + + +# +# Options defined in nova.scheduler.filters.ram_filter +# + +# Virtual ram to physical ram allocation ratio which affects +# all ram filters. This configuration specifies a global ratio +# for RamFilter. For AggregateRamFilter, it will fall back to +# this configuration value if no per-aggregate setting found. +# (floating point value) +#ram_allocation_ratio=1.5 + + +# +# Options defined in nova.scheduler.host_manager +# + +# Filter classes available to the scheduler which may be +# specified more than once. An entry of +# "nova.scheduler.filters.standard_filters" maps to all +# filters included with nova. (multi valued) +#scheduler_available_filters=nova.scheduler.filters.all_filters + +# Which filter class names to use for filtering hosts when not +# specified in the request. (list value) +#scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter + +# Which weight class names to use for weighing hosts (list +# value) +#scheduler_weight_classes=nova.scheduler.weights.all_weighers + + +# +# Options defined in nova.scheduler.manager +# + +# Default driver to use for the scheduler (string value) +#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler + +# How often (in seconds) to run periodic tasks in the +# scheduler driver of your choice. Please note this is likely +# to interact with the value of service_down_time, but exactly +# how they interact will depend on your choice of scheduler +# driver. (integer value) +#scheduler_driver_task_period=60 + + +# +# Options defined in nova.scheduler.rpcapi +# + +# The topic scheduler nodes listen on (string value) +#scheduler_topic=scheduler + + +# +# Options defined in nova.scheduler.scheduler_options +# + +# Absolute path to scheduler configuration JSON file. (string +# value) +#scheduler_json_config_location= + + +# +# Options defined in nova.scheduler.weights.ram +# + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=1.0 + + +# +# Options defined in nova.servicegroup.api +# + +# The driver for servicegroup service (valid options are: db, +# zk, mc) (string value) +#servicegroup_driver=db + + +# +# Options defined in nova.virt.configdrive +# + +# Config drive format. One of iso9660 (default) or vfat +# (string value) +#config_drive_format=iso9660 + +# Where to put temporary files associated with config drive +# creation (string value) +#config_drive_tempdir= + +# Set to force injection to take place on a config drive (if +# set, valid options are: always) (string value) +#force_config_drive= + +# Name and optionally path of the tool used for ISO image +# creation (string value) +#mkisofs_cmd=genisoimage + + +# +# Options defined in nova.virt.cpu +# + +# Defines which pcpus that instance vcpus can use. For +# example, "4-12,^8,15" (string value) +#vcpu_pin_set= + + +# +# Options defined in nova.virt.disk.api +# + +# Template file for injected network (string value) +#injected_network_template=$pybasedir/nova/virt/interfaces.template + +# Name of the mkfs commands for ephemeral device. The format +# is = (multi valued) +#virt_mkfs= + +# Attempt to resize the filesystem by accessing the image over +# a block device. This is done by the host and may not be +# necessary if the image contains a recent version of cloud- +# init. Possible mechanisms require the nbd driver (for qcow +# and raw), or loop (for raw). (boolean value) +#resize_fs_using_block_device=false + + +# +# Options defined in nova.virt.disk.mount.nbd +# + +# Amount of time, in seconds, to wait for NBD device start up. +# (integer value) +#timeout_nbd=10 + + +# +# Options defined in nova.virt.driver +# + +# Driver to use for controlling virtualization. Options +# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, +# fake.FakeDriver, baremetal.BareMetalDriver, +# vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver (string +# value) +#compute_driver= + +# The default format an ephemeral_volume will be formatted +# with on creation. (string value) +#default_ephemeral_format= + +# VM image preallocation mode: "none" => no storage +# provisioning is done up front, "space" => storage is fully +# allocated at instance start (string value) +#preallocate_images=none + +# Whether to use cow images (boolean value) +#use_cow_images=true + +# Fail instance boot if vif plugging fails (boolean value) +#vif_plugging_is_fatal=true + +# Number of seconds to wait for neutron vif plugging events to +# arrive before continuing or failing (see +# vif_plugging_is_fatal). If this is set to zero and +# vif_plugging_is_fatal is False, events should not be +# expected to arrive at all. (integer value) +#vif_plugging_timeout=300 + + +# +# Options defined in nova.virt.firewall +# + +# Firewall driver (defaults to hypervisor specific iptables +# driver) (string value) +#firewall_driver= + +# Whether to allow network traffic from same network (boolean +# value) +#allow_same_net_traffic=true + + +# +# Options defined in nova.virt.imagecache +# + +# Number of seconds to wait between runs of the image cache +# manager (integer value) +#image_cache_manager_interval=2400 + +# Where cached images are stored under $instances_path. This +# is NOT the full path - just a folder name. For per-compute- +# host cached images, set to _base_$my_ip (string value) +# Deprecated group/name - [DEFAULT]/base_dir_name +#image_cache_subdirectory_name=_base + +# Should unused base images be removed? (boolean value) +#remove_unused_base_images=true + +# Unused unresized base images younger than this will not be +# removed (integer value) +#remove_unused_original_minimum_age_seconds=86400 + + +# +# Options defined in nova.virt.imagehandler +# + +# Specifies which image handler extension names to use for +# handling images. The first extension in the list which can +# handle the image with a suitable location will be used. +# (list value) +#image_handlers=download + + +# +# Options defined in nova.virt.images +# + +# Force backing images to raw format (boolean value) +#force_raw_images=true + + +# +# Options defined in nova.vnc +# + +# Location of VNC console proxy, in the form +# "http://127.0.0.1:6080/vnc_auto.html" (string value) +#novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html + +# Location of nova xvp VNC console proxy, in the form +# "http://127.0.0.1:6081/console" (string value) +#xvpvncproxy_base_url=http://127.0.0.1:6081/console + +# IP address on which instance vncservers should listen +# (string value) +#vncserver_listen=127.0.0.1 + +# The address to which proxy clients (like nova-xvpvncproxy) +# should connect (string value) +#vncserver_proxyclient_address=127.0.0.1 + +# Enable VNC related features (boolean value) +#vnc_enabled=true + +# Keymap for VNC (string value) +#vnc_keymap=en-us + + +# +# Options defined in nova.vnc.xvp_proxy +# + +# Port that the XCP VNC proxy should bind to (integer value) +#xvpvncproxy_port=6081 + +# Address that the XCP VNC proxy should bind to (string value) +#xvpvncproxy_host=0.0.0.0 + + +# +# Options defined in nova.volume +# + +# The full class name of the volume API class to use (string +# value) +#volume_api_class=nova.volume.cinder.API + + +# +# Options defined in nova.volume.cinder +# + +# Info to match when looking for cinder in the service +# catalog. Format is: separated values of the form: +# :: (string value) +#cinder_catalog_info=volume:cinder:publicURL + +# Override service catalog lookup with template for cinder +# endpoint e.g. http://localhost:8776/v1/%(project_id)s +# (string value) +#cinder_endpoint_template= + +# Region name of this node (string value) +#os_region_name= + +# Location of ca certificates file to use for cinder client +# requests. (string value) +#cinder_ca_certificates_file= + +# Number of cinderclient retries on failed http calls (integer +# value) +#cinder_http_retries=3 + +# Allow to perform insecure SSL requests to cinder (boolean +# value) +#cinder_api_insecure=false + +# Allow attach between instance and volume in different +# availability zones. (boolean value) +#cinder_cross_az_attach=true + + +[baremetal] + +# +# Options defined in nova.virt.baremetal.db.api +# + +# The backend to use for bare-metal database (string value) +#db_backend=sqlalchemy + + +# +# Options defined in nova.virt.baremetal.db.sqlalchemy.session +# + +# The SQLAlchemy connection string used to connect to the +# bare-metal database (string value) +#sql_connection=sqlite:///$state_path/baremetal_nova.sqlite + + +# +# Options defined in nova.virt.baremetal.driver +# + +# Baremetal VIF driver. (string value) +#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver + +# Baremetal volume driver. (string value) +#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver + +# A list of additional capabilities corresponding to +# flavor_extra_specs for this compute host to advertise. Valid +# entries are name=value, pairs For example, "key1:val1, +# key2:val2" (list value) +# Deprecated group/name - [DEFAULT]/instance_type_extra_specs +#flavor_extra_specs= + +# Baremetal driver back-end (pxe or tilera) (string value) +#driver=nova.virt.baremetal.pxe.PXE + +# Baremetal power management method (string value) +#power_manager=nova.virt.baremetal.ipmi.IPMI + +# Baremetal compute node's tftp root path (string value) +#tftp_root=/tftpboot + + +# +# Options defined in nova.virt.baremetal.ipmi +# + +# Path to baremetal terminal program (string value) +#terminal=shellinaboxd + +# Path to baremetal terminal SSL cert(PEM) (string value) +#terminal_cert_dir= + +# Path to directory stores pidfiles of baremetal_terminal +# (string value) +#terminal_pid_dir=$state_path/baremetal/console + +# Maximal number of retries for IPMI operations (integer +# value) +#ipmi_power_retry=10 + + +# +# Options defined in nova.virt.baremetal.pxe +# + +# Default kernel image ID used in deployment phase (string +# value) +#deploy_kernel= + +# Default ramdisk image ID used in deployment phase (string +# value) +#deploy_ramdisk= + +# Template file for injected network config (string value) +#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template + +# Additional append parameters for baremetal PXE boot (string +# value) +#pxe_append_params=nofb nomodeset vga=normal + +# Template file for PXE configuration (string value) +#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template + +# If True, enable file injection for network info, files and +# admin password (boolean value) +#use_file_injection=false + +# Timeout for PXE deployments. Default: 0 (unlimited) (integer +# value) +#pxe_deploy_timeout=0 + +# If set, pass the network configuration details to the +# initramfs via cmdline. (boolean value) +#pxe_network_config=false + +# This gets passed to Neutron as the bootfile dhcp parameter. +# (string value) +#pxe_bootfile_name=pxelinux.0 + + +# +# Options defined in nova.virt.baremetal.tilera_pdu +# + +# IP address of tilera pdu (string value) +#tile_pdu_ip=10.0.100.1 + +# Management script for tilera pdu (string value) +#tile_pdu_mgr=/tftpboot/pdu_mgr + +# Power status of tilera PDU is OFF (integer value) +#tile_pdu_off=2 + +# Power status of tilera PDU is ON (integer value) +#tile_pdu_on=1 + +# Power status of tilera PDU (integer value) +#tile_pdu_status=9 + +# Wait time in seconds until check the result after tilera +# power operations (integer value) +#tile_power_wait=9 + + +# +# Options defined in nova.virt.baremetal.virtual_power_driver +# + +# IP or name to virtual power host (string value) +#virtual_power_ssh_host= + +# Port to use for ssh to virtual power host (integer value) +#virtual_power_ssh_port=22 + +# Base command to use for virtual power(vbox, virsh) (string +# value) +#virtual_power_type=virsh + +# User to execute virtual power commands as (string value) +#virtual_power_host_user= + +# Password for virtual power host_user (string value) +#virtual_power_host_pass= + +# The ssh key for virtual power host_user (string value) +#virtual_power_host_key= + + +# +# Options defined in nova.virt.baremetal.volume_driver +# + +# Do not set this out of dev/test environments. If a node does +# not have a fixed PXE IP address, volumes are exported with +# globally opened ACL (boolean value) +#use_unsafe_iscsi=false + +# The iSCSI IQN prefix used in baremetal volume connections. +# (string value) +#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal + + +[cells] + +# +# Options defined in nova.cells.manager +# + +# Cells communication driver to use (string value) +#driver=nova.cells.rpc_driver.CellsRPCDriver + +# Number of seconds after an instance was updated or deleted +# to continue to update cells (integer value) +#instance_updated_at_threshold=3600 + +# Number of instances to update per periodic task run (integer +# value) +#instance_update_num_instances=1 + + +# +# Options defined in nova.cells.messaging +# + +# Maximum number of hops for cells routing. (integer value) +#max_hop_count=10 + +# Cells scheduler to use (string value) +#scheduler=nova.cells.scheduler.CellsScheduler + + +# +# Options defined in nova.cells.opts +# + +# Enable cell functionality (boolean value) +#enable=false + +# The topic cells nodes listen on (string value) +#topic=cells + +# Manager for cells (string value) +#manager=nova.cells.manager.CellsManager + +# Name of this cell (string value) +#name=nova + +# Key/Multi-value list with the capabilities of the cell (list +# value) +#capabilities=hypervisor=xenserver;kvm,os=linux;windows + +# Seconds to wait for response from a call to a cell. (integer +# value) +#call_timeout=60 + +# Percentage of cell capacity to hold in reserve. Affects both +# memory and disk utilization (floating point value) +#reserve_percent=10.0 + +# Type of cell: api or compute (string value) +#cell_type=compute + +# Number of seconds after which a lack of capability and +# capacity updates signals the child cell is to be treated as +# a mute. (integer value) +#mute_child_interval=300 + +# Seconds between bandwidth updates for cells. (integer value) +#bandwidth_update_interval=600 + + +# +# Options defined in nova.cells.rpc_driver +# + +# Base queue name to use when communicating between cells. +# Various topics by message type will be appended to this. +# (string value) +#rpc_driver_queue_base=cells.intercell + + +# +# Options defined in nova.cells.scheduler +# + +# Filter classes the cells scheduler should use. An entry of +# "nova.cells.filters.all_filters" maps to all cells filters +# included with nova. (list value) +#scheduler_filter_classes=nova.cells.filters.all_filters + +# Weigher classes the cells scheduler should use. An entry of +# "nova.cells.weights.all_weighers" maps to all cell weighers +# included with nova. (list value) +#scheduler_weight_classes=nova.cells.weights.all_weighers + +# How many retries when no cells are available. (integer +# value) +#scheduler_retries=10 + +# How often to retry in seconds when no cells are available. +# (integer value) +#scheduler_retry_delay=2 + + +# +# Options defined in nova.cells.state +# + +# Interval, in seconds, for getting fresh cell information +# from the database. (integer value) +#db_check_interval=60 + +# Configuration file from which to read cells configuration. +# If given, overrides reading cells from the database. (string +# value) +#cells_config= + + +# +# Options defined in nova.cells.weights.mute_child +# + +# Multiplier used to weigh mute children. (The value should be +# negative.) (floating point value) +#mute_weight_multiplier=-10.0 + +# Weight value assigned to mute children. (The value should be +# positive.) (floating point value) +#mute_weight_value=1000.0 + + +# +# Options defined in nova.cells.weights.ram_by_instance_type +# + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=10.0 + + +# +# Options defined in nova.cells.weights.weight_offset +# + +# Multiplier used to weigh offset weigher. (floating point +# value) +#offset_weight_multiplier=1.0 + + +[conductor] + +# +# Options defined in nova.conductor.api +# + +# Perform nova-conductor operations locally (boolean value) +#use_local=false + +# The topic on which conductor nodes listen (string value) +#topic=conductor + +# Full class name for the Manager for conductor (string value) +#manager=nova.conductor.manager.ConductorManager + +# Number of workers for OpenStack Conductor service. The +# default will be the number of CPUs available. (integer +# value) +#workers= + + +[database] + +# +# Options defined in nova.db.sqlalchemy.api +# + +# The SQLAlchemy connection string used to connect to the +# slave database (string value) +#slave_connection= + + +# +# Options defined in nova.openstack.common.db.options +# + +# The file name to use with SQLite (string value) +#sqlite_db=nova.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous=true + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection= + +# The SQL mode to be used for MySQL sessions (default is +# empty, meaning do not override any server-side SQL mode +# setting) (string value) +#mysql_sql_mode= + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect=false + +# seconds between db connection retries (integer value) +#db_retry_interval=1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval=true + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval=10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries=20 + + +[hyperv] + +# +# Options defined in nova.virt.hyperv.pathutils +# + +# The name of a Windows share name mapped to the +# "instances_path" dir and used by the resize feature to copy +# files to the target host. If left blank, an administrative +# share will be used, looking for the same "instances_path" +# used locally (string value) +#instances_path_share= + + +# +# Options defined in nova.virt.hyperv.utilsfactory +# + +# Force V1 WMI utility classes (boolean value) +#force_hyperv_utils_v1=false + +# Force V1 volume utility class (boolean value) +#force_volumeutils_v1=false + + +# +# Options defined in nova.virt.hyperv.vif +# + +# External virtual switch Name, if not provided, the first +# external virtual switch is used (string value) +#vswitch_name= + + +# +# Options defined in nova.virt.hyperv.vmops +# + +# Required for live migration among hosts with different CPU +# features (boolean value) +#limit_cpu_features=false + +# Sets the admin password in the config drive image (boolean +# value) +#config_drive_inject_password=false + +# Path of qemu-img command which is used to convert between +# different image types (string value) +#qemu_img_cmd=qemu-img.exe + +# Attaches the Config Drive image as a cdrom drive instead of +# a disk drive (boolean value) +#config_drive_cdrom=false + +# Enables metrics collections for an instance by using +# Hyper-V's metric APIs. Collected data can by retrieved by +# other apps and services, e.g.: Ceilometer. Requires Hyper-V +# / Windows Server 2012 and above (boolean value) +#enable_instance_metrics_collection=false + +# Enables dynamic memory allocation (ballooning) when set to a +# value greater than 1. The value expresses the ratio between +# the total RAM assigned to an instance and its startup RAM +# amount. For example a ratio of 2.0 for an instance with +# 1024MB of RAM implies 512MB of RAM allocated at startup +# (floating point value) +#dynamic_memory_ratio=1.0 + + +# +# Options defined in nova.virt.hyperv.volumeops +# + +# The number of times to retry to attach a volume (integer +# value) +#volume_attach_retry_count=10 + +# Interval between volume attachment attempts, in seconds +# (integer value) +#volume_attach_retry_interval=5 + +# The number of times to retry checking for a disk mounted via +# iSCSI. (integer value) +#mounted_disk_query_retry_count=10 + +# Interval between checks for a mounted iSCSI disk, in +# seconds. (integer value) +#mounted_disk_query_retry_interval=5 + + +[image_file_url] + +# +# Options defined in nova.image.download.file +# + +# List of file systems that are configured in this file in the +# image_file_url: sections (list value) +#filesystems= + + +[keymgr] + +# +# Options defined in nova.keymgr +# + +# The full class name of the key manager API class (string +# value) +#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager + + +# +# Options defined in nova.keymgr.conf_key_mgr +# + +# Fixed key returned by key manager, specified in hex (string +# value) +#fixed_key= + + +[keystone_authtoken] + +# +# Options defined in keystonemiddleware.auth_token +# + +# Prefix to prepend at the beginning of the path (string +# value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint (string +# value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint(http or https) +# (string value) +#auth_protocol=https + +# Complete public Identity API endpoint (string value) +#auth_uri= + +# API version of the admin Identity API endpoint (string +# value) +#auth_version= + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# Single shared secret with the Keystone configuration used +# for bootstrapping a Keystone installation, or otherwise +# bypassing the normal authentication process. (string value) +#admin_token= + +# Keystone account username (string value) +#admin_user= + +# Keystone account password (string value) +#admin_password= + +# Keystone service account tenant name to validate user tokens +# (string value) +#admin_tenant_name=admin + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for +# caching. If left undefined, tokens will instead be cached +# in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating +# tokens, the middleware caches previously-seen tokens for a +# configurable duration (in seconds). Set to -1 to disable +# caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens +# is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache +# duration may significantly reduce performance. (integer +# value) +#revocation_cache_time=300 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + + +[libvirt] + +# +# Options defined in nova.virt.libvirt.driver +# + +# Rescue ami image (string value) +#rescue_image_id= + +# Rescue aki image (string value) +#rescue_kernel_id= + +# Rescue ari image (string value) +#rescue_ramdisk_id= + +# Libvirt domain type (valid options are: kvm, lxc, qemu, uml, +# xen) (string value) +# Deprecated group/name - [DEFAULT]/libvirt_type +#virt_type=kvm + +# Override the default libvirt URI (which is dependent on +# virt_type) (string value) +# Deprecated group/name - [DEFAULT]/libvirt_uri +#connection_uri= + +# Inject the admin password at boot time, without an agent. +# (boolean value) +# Deprecated group/name - [DEFAULT]/libvirt_inject_password +#inject_password=false + +# Inject the ssh public key at boot time (boolean value) +# Deprecated group/name - [DEFAULT]/libvirt_inject_key +#inject_key=false + +# The partition to inject to : -2 => disable, -1 => inspect +# (libguestfs only), 0 => not partitioned, >0 => partition +# number (integer value) +# Deprecated group/name - [DEFAULT]/libvirt_inject_partition +#inject_partition=-2 + +# Sync virtual and real mouse cursors in Windows VMs (boolean +# value) +#use_usb_tablet=true + +# Migration target URI (any included "%s" is replaced with the +# migration target hostname) (string value) +#live_migration_uri=qemu+tcp://%s/system + +# Migration flags to be set for live migration (string value) +#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER + +# Migration flags to be set for block migration (string value) +#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC + +# Maximum bandwidth to be used during migration, in Mbps +# (integer value) +#live_migration_bandwidth=0 + +# Snapshot image format (valid options are : raw, qcow2, vmdk, +# vdi). Defaults to same as source image (string value) +#snapshot_image_format= + +# DEPRECATED. The libvirt VIF driver to configure the +# VIFs.This option is deprecated and will be removed in the +# Juno release. (string value) +# Deprecated group/name - [DEFAULT]/libvirt_vif_driver +#vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver + +# Libvirt handlers for remote volumes. (list value) +# Deprecated group/name - [DEFAULT]/libvirt_volume_drivers +#volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver + +# Override the default disk prefix for the devices attached to +# a server, which is dependent on virt_type. (valid options +# are: sd, xvd, uvd, vd) (string value) +# Deprecated group/name - [DEFAULT]/libvirt_disk_prefix +#disk_prefix= + +# Number of seconds to wait for instance to shut down after +# soft reboot request is made. We fall back to hard reboot if +# instance does not shutdown within this window. (integer +# value) +# Deprecated group/name - [DEFAULT]/libvirt_wait_soft_reboot_seconds +#wait_soft_reboot_seconds=120 + +# Set to "host-model" to clone the host CPU feature flags; to +# "host-passthrough" to use the host CPU model exactly; to +# "custom" to use a named CPU model; to "none" to not set any +# CPU model. If virt_type="kvm|qemu", it will default to +# "host-model", otherwise it will default to "none" (string +# value) +# Deprecated group/name - [DEFAULT]/libvirt_cpu_mode +#cpu_mode= + +# Set to a named libvirt CPU model (see names listed in +# /usr/share/libvirt/cpu_map.xml). Only has effect if +# cpu_mode="custom" and virt_type="kvm|qemu" (string value) +# Deprecated group/name - [DEFAULT]/libvirt_cpu_model +#cpu_model= + +# Location where libvirt driver will store snapshots before +# uploading them to image service (string value) +# Deprecated group/name - [DEFAULT]/libvirt_snapshots_directory +#snapshots_directory=$instances_path/snapshots + +# Location where the Xen hvmloader is kept (string value) +#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader + +# Specific cachemodes to use for different disk types e.g: +# file=directsync,block=none (list value) +#disk_cachemodes= + +# A path to a device that will be used as source of entropy on +# the host. Permitted options are: /dev/random or /dev/hwrng +# (string value) +#rng_dev_path= + + +# +# Options defined in nova.virt.libvirt.imagebackend +# + +# VM Images format. Acceptable values are: raw, qcow2, lvm, +# rbd, default. If default is specified, then use_cow_images +# flag is used instead of this one. (string value) +# Deprecated group/name - [DEFAULT]/libvirt_images_type +#images_type=default + +# LVM Volume Group that is used for VM images, when you +# specify images_type=lvm. (string value) +# Deprecated group/name - [DEFAULT]/libvirt_images_volume_group +#images_volume_group= + +# Create sparse logical volumes (with virtualsize) if this +# flag is set to True. (boolean value) +# Deprecated group/name - [DEFAULT]/libvirt_sparse_logical_volumes +#sparse_logical_volumes=false + +# Method used to wipe old volumes (valid options are: none, +# zero, shred) (string value) +#volume_clear=zero + +# Size in MiB to wipe at start of old volumes. 0 => all +# (integer value) +#volume_clear_size=0 + +# The RADOS pool in which rbd volumes are stored (string +# value) +# Deprecated group/name - [DEFAULT]/libvirt_images_rbd_pool +#images_rbd_pool=rbd + +# Path to the ceph configuration file to use (string value) +# Deprecated group/name - [DEFAULT]/libvirt_images_rbd_ceph_conf +#images_rbd_ceph_conf= + + +# +# Options defined in nova.virt.libvirt.imagecache +# + +# Allows image information files to be stored in non-standard +# locations (string value) +#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info + +# Should unused kernel images be removed? This is only safe to +# enable if all compute nodes have been updated to support +# this option. This will be enabled by default in future. +# (boolean value) +#remove_unused_kernels=false + +# Unused resized base images younger than this will not be +# removed (integer value) +#remove_unused_resized_minimum_age_seconds=3600 + +# Write a checksum for files in _base to disk (boolean value) +#checksum_base_images=false + +# How frequently to checksum base images (integer value) +#checksum_interval_seconds=3600 + + +# +# Options defined in nova.virt.libvirt.utils +# + +# Compress snapshot images when possible. This currently +# applies exclusively to qcow2 images (boolean value) +# Deprecated group/name - [DEFAULT]/libvirt_snapshot_compression +#snapshot_compression=false + + +# +# Options defined in nova.virt.libvirt.vif +# + +# Use virtio for bridge interfaces with KVM/QEMU (boolean +# value) +# Deprecated group/name - [DEFAULT]/libvirt_use_virtio_for_bridges +#use_virtio_for_bridges=true + + +# +# Options defined in nova.virt.libvirt.volume +# + +# Number of times to rescan iSCSI target to find volume +# (integer value) +#num_iscsi_scan_tries=5 + +# Number of times to rescan iSER target to find volume +# (integer value) +#num_iser_scan_tries=5 + +# The RADOS client name for accessing rbd volumes (string +# value) +#rbd_user= + +# The libvirt UUID of the secret for the rbd_uservolumes +# (string value) +#rbd_secret_uuid= + +# Directory where the NFS volume is mounted on the compute +# node (string value) +#nfs_mount_point_base=$state_path/mnt + +# Mount options passedf to the NFS client. See section of the +# nfs man page for details (string value) +#nfs_mount_options= + +# Number of times to rediscover AoE target to find volume +# (integer value) +#num_aoe_discover_tries=3 + +# Directory where the glusterfs volume is mounted on the +# compute node (string value) +#glusterfs_mount_point_base=$state_path/mnt + +# Use multipath connection of the iSCSI volume (boolean value) +# Deprecated group/name - [DEFAULT]/libvirt_iscsi_use_multipath +#iscsi_use_multipath=false + +# Use multipath connection of the iSER volume (boolean value) +# Deprecated group/name - [DEFAULT]/libvirt_iser_use_multipath +#iser_use_multipath=false + +# Path or URL to Scality SOFS configuration file (string +# value) +#scality_sofs_config= + +# Base dir where Scality SOFS shall be mounted (string value) +#scality_sofs_mount_point=$state_path/scality + +# Protocols listed here will be accessed directly from QEMU. +# Currently supported protocols: [gluster] (list value) +#qemu_allowed_storage_drivers= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[metrics] + +# +# Options defined in nova.scheduler.weights.metrics +# + +# Multiplier used for weighing metrics. (floating point value) +#weight_multiplier=1.0 + +# How the metrics are going to be weighed. This should be in +# the form of "=, =, ...", where +# is one of the metrics to be weighed, and is +# the corresponding ratio. So for "name1=1.0, name2=-1.0" The +# final weight would be name1.value * 1.0 + name2.value * +# -1.0. (list value) +#weight_setting= + +# How to treat the unavailable metrics. When a metric is NOT +# available for a host, if it is set to be True, it would +# raise an exception, so it is recommended to use the +# scheduler filter MetricFilter to filter out those hosts. If +# it is set to be False, the unavailable metric would be +# treated as a negative factor in weighing process, the +# returned value would be set by the option +# weight_of_unavailable. (boolean value) +#required=true + +# The final weight value to be returned if required is set to +# False and any one of the metrics set by weight_setting is +# unavailable. (floating point value) +#weight_of_unavailable=-10000.0 + + +[osapi_v3] + +# +# Options defined in nova.api.openstack +# + +# Whether the V3 API is enabled or not (boolean value) +#enabled=false + +# A list of v3 API extensions to never load. Specify the +# extension aliases here. (list value) +#extensions_blacklist= + +# If the list is not empty then a v3 API extension will only +# be loaded if it exists in this list. Specify the extension +# aliases here. (list value) +#extensions_whitelist= + + +[rdp] + +# +# Options defined in nova.rdp +# + +# Location of RDP html5 console proxy, in the form +# "http://127.0.0.1:6083/" (string value) +#html5_proxy_base_url=http://127.0.0.1:6083/ + +# Enable RDP related features (boolean value) +#enabled=false + + +[spice] + +# +# Options defined in nova.spice +# + +# Location of spice HTML5 console proxy, in the form +# "http://127.0.0.1:6082/spice_auto.html" (string value) +#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html + +# IP address on which instance spice server should listen +# (string value) +#server_listen=127.0.0.1 + +# The address to which proxy clients (like nova- +# spicehtml5proxy) should connect (string value) +#server_proxyclient_address=127.0.0.1 + +# Enable spice related features (boolean value) +#enabled=false + +# Enable spice guest agent support (boolean value) +#agent_enabled=true + +# Keymap for spice (string value) +#keymap=en-us + + +[ssl] + +# +# Options defined in nova.openstack.common.sslutils +# + +# CA certificate file to use to verify connecting clients. +# (string value) +#ca_file= + +# Certificate file to use when starting the server securely. +# (string value) +#cert_file= + +# Private key file to use when starting the server securely. +# (string value) +#key_file= + + +[trusted_computing] + +# +# Options defined in nova.scheduler.filters.trusted_filter +# + +# Attestation server HTTP (string value) +#attestation_server= + +# Attestation server Cert file for Identity verification +# (string value) +#attestation_server_ca_file= + +# Attestation server port (string value) +#attestation_port=8443 + +# Attestation web API URL (string value) +#attestation_api_url=/OpenAttestationWebServices/V1.0 + +# Attestation authorization blob - must change (string value) +#attestation_auth_blob= + +# Attestation status cache valid period length (integer value) +#attestation_auth_timeout=60 + + +[upgrade_levels] + +# +# Options defined in nova.baserpc +# + +# Set a version cap for messages sent to the base api in any +# service (string value) +#baseapi= + + +# +# Options defined in nova.cells.rpc_driver +# + +# Set a version cap for messages sent between cells services +# (string value) +#intercell= + + +# +# Options defined in nova.cells.rpcapi +# + +# Set a version cap for messages sent to local cells services +# (string value) +#cells= + + +# +# Options defined in nova.cert.rpcapi +# + +# Set a version cap for messages sent to cert services (string +# value) +#cert= + + +# +# Options defined in nova.compute.rpcapi +# + +# Set a version cap for messages sent to compute services. If +# you plan to do a live upgrade from havana to icehouse, you +# should set this option to "icehouse-compat" before beginning +# the live upgrade procedure. (string value) +#compute= + + +# +# Options defined in nova.conductor.rpcapi +# + +# Set a version cap for messages sent to conductor services +# (string value) +#conductor= + + +# +# Options defined in nova.console.rpcapi +# + +# Set a version cap for messages sent to console services +# (string value) +#console= + + +# +# Options defined in nova.consoleauth.rpcapi +# + +# Set a version cap for messages sent to consoleauth services +# (string value) +#consoleauth= + + +# +# Options defined in nova.network.rpcapi +# + +# Set a version cap for messages sent to network services +# (string value) +#network= + + +# +# Options defined in nova.scheduler.rpcapi +# + +# Set a version cap for messages sent to scheduler services +# (string value) +#scheduler= + + +[vmware] + +# +# Options defined in nova.virt.vmwareapi.driver +# + +# Hostname or IP address for connection to VMware ESX/VC host. +# (string value) +#host_ip= + +# Username for connection to VMware ESX/VC host. (string +# value) +#host_username= + +# Password for connection to VMware ESX/VC host. (string +# value) +#host_password= + +# Name of a VMware Cluster ComputeResource. Used only if +# compute_driver is vmwareapi.VMwareVCDriver. (multi valued) +#cluster_name= + +# Regex to match the name of a datastore. (string value) +#datastore_regex= + +# The interval used for polling of remote tasks. (floating +# point value) +#task_poll_interval=0.5 + +# The number of times we retry on failures, e.g., socket +# error, etc. (integer value) +#api_retry_count=10 + +# VNC starting port (integer value) +#vnc_port=5900 + +# Total number of VNC ports (integer value) +#vnc_port_total=10000 + +# Whether to use linked clone (boolean value) +#use_linked_clone=true + + +# +# Options defined in nova.virt.vmwareapi.vif +# + +# Physical ethernet adapter name for vlan networking (string +# value) +#vlan_interface=vmnic0 + + +# +# Options defined in nova.virt.vmwareapi.vim +# + +# Optional VIM Service WSDL Location e.g +# http:///vimService.wsdl. Optional over-ride to +# default location for bug work-arounds (string value) +#wsdl_location= + + +# +# Options defined in nova.virt.vmwareapi.vim_util +# + +# The maximum number of ObjectContent data objects that should +# be returned in a single result. A positive value will cause +# the operation to suspend the retrieval when the count of +# objects reaches the specified maximum. The server may still +# limit the count to something less than the configured value. +# Any remaining objects may be retrieved with additional +# requests. (integer value) +#maximum_objects=100 + + +# +# Options defined in nova.virt.vmwareapi.vmops +# + +# Name of Integration Bridge (string value) +#integration_bridge=br-int + + +[xenserver] + +# +# Options defined in nova.virt.xenapi.agent +# + +# Number of seconds to wait for agent reply (integer value) +# Deprecated group/name - [DEFAULT]/agent_timeout +#agent_timeout=30 + +# Number of seconds to wait for agent to be fully operational +# (integer value) +# Deprecated group/name - [DEFAULT]/agent_version_timeout +#agent_version_timeout=300 + +# Number of seconds to wait for agent reply to resetnetwork +# request (integer value) +# Deprecated group/name - [DEFAULT]/agent_resetnetwork_timeout +#agent_resetnetwork_timeout=60 + +# Specifies the path in which the XenAPI guest agent should be +# located. If the agent is present, network configuration is +# not injected into the image. Used if +# compute_driver=xenapi.XenAPIDriver and flat_injected=True +# (string value) +# Deprecated group/name - [DEFAULT]/xenapi_agent_path +#agent_path=usr/sbin/xe-update-networking + +# Disables the use of the XenAPI agent in any image regardless +# of what image properties are present. (boolean value) +# Deprecated group/name - [DEFAULT]/xenapi_disable_agent +#disable_agent=false + +# Determines if the XenAPI agent should be used when the image +# used does not contain a hint to declare if the agent is +# present or not. The hint is a glance property +# "xenapi_use_agent" that has the value "True" or "False". +# Note that waiting for the agent when it is not present will +# significantly increase server boot times. (boolean value) +# Deprecated group/name - [DEFAULT]/xenapi_use_agent_default +#use_agent_default=false + + +# +# Options defined in nova.virt.xenapi.client.session +# + +# Timeout in seconds for XenAPI login. (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_login_timeout +#login_timeout=10 + +# Maximum number of concurrent XenAPI connections. Used only +# if compute_driver=xenapi.XenAPIDriver (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_connection_concurrent +#connection_concurrent=5 + + +# +# Options defined in nova.virt.xenapi.driver +# + +# URL for connection to XenServer/Xen Cloud Platform. A +# special value of unix://local can be used to connect to the +# local unix socket. Required if +# compute_driver=xenapi.XenAPIDriver (string value) +# Deprecated group/name - [DEFAULT]/xenapi_connection_url +#connection_url= + +# Username for connection to XenServer/Xen Cloud Platform. +# Used only if compute_driver=xenapi.XenAPIDriver (string +# value) +# Deprecated group/name - [DEFAULT]/xenapi_connection_username +#connection_username=root + +# Password for connection to XenServer/Xen Cloud Platform. +# Used only if compute_driver=xenapi.XenAPIDriver (string +# value) +# Deprecated group/name - [DEFAULT]/xenapi_connection_password +#connection_password= + +# The interval used for polling of coalescing vhds. Used only +# if compute_driver=xenapi.XenAPIDriver (floating point value) +# Deprecated group/name - [DEFAULT]/xenapi_vhd_coalesce_poll_interval +#vhd_coalesce_poll_interval=5.0 + +# Ensure compute service is running on host XenAPI connects +# to. (boolean value) +# Deprecated group/name - [DEFAULT]/xenapi_check_host +#check_host=true + +# Max number of times to poll for VHD to coalesce. Used only +# if compute_driver=xenapi.XenAPIDriver (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_vhd_coalesce_max_attempts +#vhd_coalesce_max_attempts=20 + +# Base path to the storage repository (string value) +# Deprecated group/name - [DEFAULT]/xenapi_sr_base_path +#sr_base_path=/var/run/sr-mount + +# The iSCSI Target Host (string value) +# Deprecated group/name - [DEFAULT]/target_host +#target_host= + +# The iSCSI Target Port, default is port 3260 (string value) +# Deprecated group/name - [DEFAULT]/target_port +#target_port=3260 + +# IQN Prefix (string value) +# Deprecated group/name - [DEFAULT]/iqn_prefix +#iqn_prefix=iqn.2010-10.org.openstack + +# Used to enable the remapping of VBD dev (Works around an +# issue in Ubuntu Maverick) (boolean value) +# Deprecated group/name - [DEFAULT]/xenapi_remap_vbd_dev +#remap_vbd_dev=false + +# Specify prefix to remap VBD dev to (ex. /dev/xvdb -> +# /dev/sdb) (string value) +# Deprecated group/name - [DEFAULT]/xenapi_remap_vbd_dev_prefix +#remap_vbd_dev_prefix=sd + + +# +# Options defined in nova.virt.xenapi.image.bittorrent +# + +# Base URL for torrent files. (string value) +# Deprecated group/name - [DEFAULT]/xenapi_torrent_base_url +#torrent_base_url= + +# Probability that peer will become a seeder. (1.0 = 100%) +# (floating point value) +# Deprecated group/name - [DEFAULT]/xenapi_torrent_seed_chance +#torrent_seed_chance=1.0 + +# Number of seconds after downloading an image via BitTorrent +# that it should be seeded for other peers. (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_torrent_seed_duration +#torrent_seed_duration=3600 + +# Cached torrent files not accessed within this number of +# seconds can be reaped (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_torrent_max_last_accessed +#torrent_max_last_accessed=86400 + +# Beginning of port range to listen on (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_torrent_listen_port_start +#torrent_listen_port_start=6881 + +# End of port range to listen on (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_torrent_listen_port_end +#torrent_listen_port_end=6891 + +# Number of seconds a download can remain at the same progress +# percentage w/o being considered a stall (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_torrent_download_stall_cutoff +#torrent_download_stall_cutoff=600 + +# Maximum number of seeder processes to run concurrently +# within a given dom0. (-1 = no limit) (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_torrent_max_seeder_processes_per_host +#torrent_max_seeder_processes_per_host=1 + + +# +# Options defined in nova.virt.xenapi.pool +# + +# To use for hosts with different CPUs (boolean value) +# Deprecated group/name - [DEFAULT]/use_join_force +#use_join_force=true + + +# +# Options defined in nova.virt.xenapi.vif +# + +# Name of Integration Bridge used by Open vSwitch (string +# value) +# Deprecated group/name - [DEFAULT]/xenapi_ovs_integration_bridge +#ovs_integration_bridge=xapi1 + + +# +# Options defined in nova.virt.xenapi.vm_utils +# + +# Cache glance images locally. `all` will cache all images, +# `some` will only cache images that have the image_property +# `cache_in_nova=True`, and `none` turns off caching entirely +# (string value) +# Deprecated group/name - [DEFAULT]/cache_images +#cache_images=all + +# Compression level for images, e.g., 9 for gzip -9. Range is +# 1-9, 9 being most compressed but most CPU intensive on dom0. +# (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_image_compression_level +#image_compression_level= + +# Default OS type (string value) +# Deprecated group/name - [DEFAULT]/default_os_type +#default_os_type=linux + +# Time to wait for a block device to be created (integer +# value) +# Deprecated group/name - [DEFAULT]/block_device_creation_timeout +#block_device_creation_timeout=10 + +# Maximum size in bytes of kernel or ramdisk images (integer +# value) +# Deprecated group/name - [DEFAULT]/max_kernel_ramdisk_size +#max_kernel_ramdisk_size=16777216 + +# Filter for finding the SR to be used to install guest +# instances on. To use the Local Storage in default +# XenServer/XCP installations set this flag to other-config +# :i18n-key=local-storage. To select an SR with a different +# matching criteria, you could set it to other- +# config:my_favorite_sr=true. On the other hand, to fall back +# on the Default SR, as displayed by XenCenter, set this flag +# to: default-sr:true (string value) +# Deprecated group/name - [DEFAULT]/sr_matching_filter +#sr_matching_filter=default-sr:true + +# Whether to use sparse_copy for copying data on a resize down +# (False will use standard dd). This speeds up resizes down +# considerably since large runs of zeros won't have to be +# rsynced (boolean value) +# Deprecated group/name - [DEFAULT]/xenapi_sparse_copy +#sparse_copy=true + +# Maximum number of retries to unplug VBD (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_num_vbd_unplug_retries +#num_vbd_unplug_retries=10 + +# Whether or not to download images via Bit Torrent +# (all|some|none). (string value) +# Deprecated group/name - [DEFAULT]/xenapi_torrent_images +#torrent_images=none + +# Name of network to use for booting iPXE ISOs (string value) +# Deprecated group/name - [DEFAULT]/xenapi_ipxe_network_name +#ipxe_network_name= + +# URL to the iPXE boot menu (string value) +# Deprecated group/name - [DEFAULT]/xenapi_ipxe_boot_menu_url +#ipxe_boot_menu_url= + +# Name and optionally path of the tool used for ISO image +# creation (string value) +# Deprecated group/name - [DEFAULT]/xenapi_ipxe_mkisofs_cmd +#ipxe_mkisofs_cmd=mkisofs + + +# +# Options defined in nova.virt.xenapi.vmops +# + +# Number of seconds to wait for instance to go to running +# state (integer value) +# Deprecated group/name - [DEFAULT]/xenapi_running_timeout +#running_timeout=60 + +# The XenAPI VIF driver using XenServer Network APIs. (string +# value) +# Deprecated group/name - [DEFAULT]/xenapi_vif_driver +#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver + +# Dom0 plugin driver used to handle image uploads. (string +# value) +# Deprecated group/name - [DEFAULT]/xenapi_image_upload_handler +#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore + + +# +# Options defined in nova.virt.xenapi.volume_utils +# + +# Number of seconds to wait for an SR to settle if the VDI +# does not exist when first introduced (integer value) +#introduce_vdi_retry_wait=20 + + +[zookeeper] + +# +# Options defined in nova.servicegroup.drivers.zk +# + +# The ZooKeeper addresses for servicegroup service in the +# format of host1:port,host2:port,host3:port (string value) +#address= + +# The recv_timeout parameter for the zk session (integer +# value) +#recv_timeout=4000 + +# The prefix used in ZooKeeper to store ephemeral nodes +# (string value) +#sg_prefix=/servicegroups + +# Number of seconds to wait until retrying to join the session +# (integer value) +#sg_retry_interval=5 + + + diff --git a/openstack/etc/nova/policy.json b/openstack/etc/nova/policy.json new file mode 100644 index 00000000..cc5b8ea4 --- /dev/null +++ b/openstack/etc/nova/policy.json @@ -0,0 +1,324 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "cells_scheduler_filter:TargetCellFilter": "is_admin:True", + + "compute:create": "", + "compute:create:attach_network": "", + "compute:create:attach_volume": "", + "compute:create:forced_host": "is_admin:True", + "compute:get_all": "", + "compute:get_all_tenants": "", + "compute:start": "rule:admin_or_owner", + "compute:stop": "rule:admin_or_owner", + "compute:unlock_override": "rule:admin_api", + + "compute:shelve": "", + "compute:shelve_offload": "", + "compute:unshelve": "", + + "compute:volume_snapshot_create": "", + "compute:volume_snapshot_delete": "", + + "admin_api": "is_admin:True", + "compute:v3:servers:start": "rule:admin_or_owner", + "compute:v3:servers:stop": "rule:admin_or_owner", + "compute_extension:v3:os-access-ips:discoverable": "", + "compute_extension:v3:os-access-ips": "", + "compute_extension:accounts": "rule:admin_api", + "compute_extension:admin_actions": "rule:admin_api", + "compute_extension:admin_actions:pause": "rule:admin_or_owner", + "compute_extension:admin_actions:unpause": "rule:admin_or_owner", + "compute_extension:admin_actions:suspend": "rule:admin_or_owner", + "compute_extension:admin_actions:resume": "rule:admin_or_owner", + "compute_extension:admin_actions:lock": "rule:admin_or_owner", + "compute_extension:admin_actions:unlock": "rule:admin_or_owner", + "compute_extension:admin_actions:resetNetwork": "rule:admin_api", + "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api", + "compute_extension:admin_actions:createBackup": "rule:admin_or_owner", + "compute_extension:admin_actions:migrateLive": "rule:admin_api", + "compute_extension:admin_actions:resetState": "rule:admin_api", + "compute_extension:admin_actions:migrate": "rule:admin_api", + "compute_extension:v3:os-admin-actions": "rule:admin_api", + "compute_extension:v3:os-admin-actions:discoverable": "", + "compute_extension:v3:os-admin-actions:reset_network": "rule:admin_api", + "compute_extension:v3:os-admin-actions:inject_network_info": "rule:admin_api", + "compute_extension:v3:os-admin-actions:reset_state": "rule:admin_api", + "compute_extension:v3:os-admin-password": "", + "compute_extension:v3:os-admin-password:discoverable": "", + "compute_extension:aggregates": "rule:admin_api", + "compute_extension:v3:os-aggregates:discoverable": "", + "compute_extension:v3:os-aggregates:index": "rule:admin_api", + "compute_extension:v3:os-aggregates:create": "rule:admin_api", + "compute_extension:v3:os-aggregates:show": "rule:admin_api", + "compute_extension:v3:os-aggregates:update": "rule:admin_api", + "compute_extension:v3:os-aggregates:delete": "rule:admin_api", + "compute_extension:v3:os-aggregates:add_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api", + "compute_extension:agents": "rule:admin_api", + "compute_extension:v3:os-agents": "rule:admin_api", + "compute_extension:v3:os-agents:discoverable": "", + "compute_extension:attach_interfaces": "", + "compute_extension:v3:os-attach-interfaces": "", + "compute_extension:v3:os-attach-interfaces:discoverable": "", + "compute_extension:baremetal_nodes": "rule:admin_api", + "compute_extension:cells": "rule:admin_api", + "compute_extension:v3:os-cells": "rule:admin_api", + "compute_extension:v3:os-cells:discoverable": "", + "compute_extension:certificates": "", + "compute_extension:v3:os-certificates:create": "", + "compute_extension:v3:os-certificates:show": "", + "compute_extension:v3:os-certificates:discoverable": "", + "compute_extension:cloudpipe": "rule:admin_api", + "compute_extension:cloudpipe_update": "rule:admin_api", + "compute_extension:console_output": "", + "compute_extension:v3:consoles:discoverable": "", + "compute_extension:v3:os-console-output:discoverable": "", + "compute_extension:v3:os-console-output": "", + "compute_extension:consoles": "", + "compute_extension:v3:os-remote-consoles": "", + "compute_extension:v3:os-remote-consoles:discoverable": "", + "compute_extension:createserverext": "", + "compute_extension:v3:os-create-backup:discoverable": "", + "compute_extension:v3:os-create-backup": "rule:admin_or_owner", + "compute_extension:deferred_delete": "", + "compute_extension:v3:os-deferred-delete": "", + "compute_extension:v3:os-deferred-delete:discoverable": "", + "compute_extension:disk_config": "", + "compute_extension:evacuate": "rule:admin_api", + "compute_extension:v3:os-evacuate": "rule:admin_api", + "compute_extension:v3:os-evacuate:discoverable": "", + "compute_extension:extended_server_attributes": "rule:admin_api", + "compute_extension:v3:os-extended-server-attributes": "rule:admin_api", + "compute_extension:v3:os-extended-server-attributes:discoverable": "", + "compute_extension:extended_status": "", + "compute_extension:v3:os-extended-status": "", + "compute_extension:v3:os-extended-status:discoverable": "", + "compute_extension:extended_availability_zone": "", + "compute_extension:v3:os-extended-availability-zone": "", + "compute_extension:v3:os-extended-availability-zone:discoverable": "", + "compute_extension:extended_ips": "", + "compute_extension:extended_ips_mac": "", + "compute_extension:extended_vif_net": "", + "compute_extension:v3:extension_info:discoverable": "", + "compute_extension:extended_volumes": "", + "compute_extension:v3:os-extended-volumes": "", + "compute_extension:v3:os-extended-volumes:swap": "", + "compute_extension:v3:os-extended-volumes:discoverable": "", + "compute_extension:v3:os-extended-volumes:attach": "", + "compute_extension:v3:os-extended-volumes:detach": "", + "compute_extension:fixed_ips": "rule:admin_api", + "compute_extension:flavor_access": "", + "compute_extension:flavor_access:addTenantAccess": "rule:admin_api", + "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api", + "compute_extension:v3:flavor-access": "", + "compute_extension:v3:flavor-access:discoverable": "", + "compute_extension:v3:flavor-access:remove_tenant_access": "rule:admin_api", + "compute_extension:v3:flavor-access:add_tenant_access": "rule:admin_api", + "compute_extension:flavor_disabled": "", + "compute_extension:flavor_rxtx": "", + "compute_extension:v3:os-flavor-rxtx": "", + "compute_extension:v3:os-flavor-rxtx:discoverable": "", + "compute_extension:flavor_swap": "", + "compute_extension:flavorextradata": "", + "compute_extension:flavorextraspecs:index": "", + "compute_extension:flavorextraspecs:show": "", + "compute_extension:flavorextraspecs:create": "rule:admin_api", + "compute_extension:flavorextraspecs:update": "rule:admin_api", + "compute_extension:flavorextraspecs:delete": "rule:admin_api", + "compute_extension:v3:flavors:discoverable": "", + "compute_extension:v3:flavor-extra-specs:discoverable": "", + "compute_extension:v3:flavor-extra-specs:index": "", + "compute_extension:v3:flavor-extra-specs:show": "", + "compute_extension:v3:flavor-extra-specs:create": "rule:admin_api", + "compute_extension:v3:flavor-extra-specs:update": "rule:admin_api", + "compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api", + "compute_extension:flavormanage": "rule:admin_api", + "compute_extension:v3:flavor-manage": "rule:admin_api", + "compute_extension:floating_ip_dns": "", + "compute_extension:floating_ip_pools": "", + "compute_extension:floating_ips": "", + "compute_extension:floating_ips_bulk": "rule:admin_api", + "compute_extension:fping": "", + "compute_extension:fping:all_tenants": "rule:admin_api", + "compute_extension:hide_server_addresses": "is_admin:False", + "compute_extension:v3:os-hide-server-addresses": "is_admin:False", + "compute_extension:v3:os-hide-server-addresses:discoverable": "", + "compute_extension:hosts": "rule:admin_api", + "compute_extension:v3:os-hosts": "rule:admin_api", + "compute_extension:v3:os-hosts:discoverable": "", + "compute_extension:hypervisors": "rule:admin_api", + "compute_extension:v3:os-hypervisors": "rule:admin_api", + "compute_extension:v3:os-hypervisors:discoverable": "", + "compute_extension:image_size": "", + "compute_extension:instance_actions": "", + "compute_extension:v3:os-server-actions": "", + "compute_extension:v3:os-server-actions:discoverable": "", + "compute_extension:instance_actions:events": "rule:admin_api", + "compute_extension:v3:os-server-actions:events": "rule:admin_api", + "compute_extension:instance_usage_audit_log": "rule:admin_api", + "compute_extension:v3:ips:discoverable": "", + "compute_extension:keypairs": "", + "compute_extension:keypairs:index": "", + "compute_extension:keypairs:show": "", + "compute_extension:keypairs:create": "", + "compute_extension:keypairs:delete": "", + "compute_extension:v3:keypairs:discoverable": "", + "compute_extension:v3:keypairs": "", + "compute_extension:v3:keypairs:index": "", + "compute_extension:v3:keypairs:show": "", + "compute_extension:v3:keypairs:create": "", + "compute_extension:v3:keypairs:delete": "", + "compute_extension:v3:os-lock-server:discoverable": "", + "compute_extension:v3:os-lock-server:lock": "rule:admin_or_owner", + "compute_extension:v3:os-lock-server:unlock": "rule:admin_or_owner", + "compute_extension:v3:os-migrate-server:discoverable": "", + "compute_extension:v3:os-migrate-server:migrate": "rule:admin_api", + "compute_extension:v3:os-migrate-server:migrate_live": "rule:admin_api", + "compute_extension:multinic": "", + "compute_extension:v3:os-multinic": "", + "compute_extension:v3:os-multinic:discoverable": "", + "compute_extension:networks": "rule:admin_api", + "compute_extension:networks:view": "", + "compute_extension:networks_associate": "rule:admin_api", + "compute_extension:v3:os-pause-server:discoverable": "", + "compute_extension:v3:os-pause-server:pause": "rule:admin_or_owner", + "compute_extension:v3:os-pause-server:unpause": "rule:admin_or_owner", + "compute_extension:v3:os-pci:pci_servers": "", + "compute_extension:v3:os-pci:discoverable": "", + "compute_extension:v3:os-pci:index": "rule:admin_api", + "compute_extension:v3:os-pci:detail": "rule:admin_api", + "compute_extension:v3:os-pci:show": "rule:admin_api", + "compute_extension:quotas:show": "", + "compute_extension:quotas:update": "rule:admin_api", + "compute_extension:quotas:delete": "rule:admin_api", + "compute_extension:v3:os-quota-sets:discoverable": "", + "compute_extension:v3:os-quota-sets:show": "", + "compute_extension:v3:os-quota-sets:update": "rule:admin_api", + "compute_extension:v3:os-quota-sets:delete": "rule:admin_api", + "compute_extension:v3:os-quota-sets:detail": "rule:admin_api", + "compute_extension:quota_classes": "", + "compute_extension:rescue": "", + "compute_extension:v3:os-rescue": "", + "compute_extension:v3:os-rescue:discoverable": "", + "compute_extension:v3:os-scheduler-hints:discoverable": "", + "compute_extension:security_group_default_rules": "rule:admin_api", + "compute_extension:security_groups": "", + "compute_extension:v3:os-security-groups": "", + "compute_extension:v3:os-security-groups:discoverable": "", + "compute_extension:server_diagnostics": "rule:admin_api", + "compute_extension:v3:os-server-diagnostics": "rule:admin_api", + "compute_extension:v3:os-server-diagnostics:discoverable": "", + "compute_extension:server_groups": "", + "compute_extension:server_password": "", + "compute_extension:v3:os-server-password": "", + "compute_extension:v3:os-server-password:discoverable": "", + "compute_extension:server_usage": "", + "compute_extension:v3:os-server-usage": "", + "compute_extension:v3:os-server-usage:discoverable": "", + "compute_extension:services": "rule:admin_api", + "compute_extension:v3:os-services": "rule:admin_api", + "compute_extension:v3:os-services:discoverable": "", + "compute_extension:v3:server-metadata:discoverable": "", + "compute_extension:v3:servers:discoverable": "", + "compute_extension:shelve": "", + "compute_extension:shelveOffload": "rule:admin_api", + "compute_extension:v3:os-shelve:shelve": "", + "compute_extension:v3:os-shelve:shelve:discoverable": "", + "compute_extension:v3:os-shelve:shelve_offload": "rule:admin_api", + "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner", + "compute_extension:v3:os-suspend-server:discoverable": "", + "compute_extension:v3:os-suspend-server:suspend": "rule:admin_or_owner", + "compute_extension:v3:os-suspend-server:resume": "rule:admin_or_owner", + "compute_extension:simple_tenant_usage:list": "rule:admin_api", + "compute_extension:unshelve": "", + "compute_extension:v3:os-shelve:unshelve": "", + "compute_extension:users": "rule:admin_api", + "compute_extension:v3:os-user-data:discoverable": "", + "compute_extension:virtual_interfaces": "", + "compute_extension:virtual_storage_arrays": "", + "compute_extension:volumes": "", + "compute_extension:volume_attachments:index": "", + "compute_extension:volume_attachments:show": "", + "compute_extension:volume_attachments:create": "", + "compute_extension:volume_attachments:update": "", + "compute_extension:volume_attachments:delete": "", + "compute_extension:volumetypes": "", + "compute_extension:availability_zone:list": "", + "compute_extension:v3:os-availability-zone:list": "", + "compute_extension:v3:os-availability-zone:discoverable": "", + "compute_extension:availability_zone:detail": "rule:admin_api", + "compute_extension:v3:os-availability-zone:detail": "rule:admin_api", + "compute_extension:used_limits_for_admin": "rule:admin_api", + "compute_extension:migrations:index": "rule:admin_api", + "compute_extension:v3:os-migrations:index": "rule:admin_api", + "compute_extension:v3:os-migrations:discoverable": "", + "compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api", + "compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api", + "compute_extension:console_auth_tokens": "rule:admin_api", + "compute_extension:v3:os-console-auth-tokens": "rule:admin_api", + "compute_extension:os-server-external-events:create": "rule:admin_api", + "compute_extension:v3:os-server-external-events:create": "rule:admin_api", + + "volume:create": "", + "volume:get_all": "", + "volume:get_volume_metadata": "", + "volume:get_snapshot": "", + "volume:get_all_snapshots": "", + + + "volume_extension:types_manage": "rule:admin_api", + "volume_extension:types_extra_specs": "rule:admin_api", + "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", + "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", + "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", + + + "network:get_all": "", + "network:get": "", + "network:create": "", + "network:delete": "", + "network:associate": "", + "network:disassociate": "", + "network:get_vifs_by_instance": "", + "network:allocate_for_instance": "", + "network:deallocate_for_instance": "", + "network:validate_networks": "", + "network:get_instance_uuids_by_ip_filter": "", + "network:get_instance_id_by_floating_address": "", + "network:setup_networks_on_host": "", + "network:get_backdoor_port": "", + + "network:get_floating_ip": "", + "network:get_floating_ip_pools": "", + "network:get_floating_ip_by_address": "", + "network:get_floating_ips_by_project": "", + "network:get_floating_ips_by_fixed_address": "", + "network:allocate_floating_ip": "", + "network:deallocate_floating_ip": "", + "network:associate_floating_ip": "", + "network:disassociate_floating_ip": "", + "network:release_floating_ip": "", + "network:migrate_instance_start": "", + "network:migrate_instance_finish": "", + + "network:get_fixed_ip": "", + "network:get_fixed_ip_by_address": "", + "network:add_fixed_ip_to_instance": "", + "network:remove_fixed_ip_from_instance": "", + "network:add_network_to_project": "", + "network:get_instance_nw_info": "", + + "network:get_dns_domains": "", + "network:add_dns_entry": "", + "network:modify_dns_entry": "", + "network:delete_dns_entry": "", + "network:get_dns_entries_by_address": "", + "network:get_dns_entries_by_name": "", + "network:create_private_dns_domain": "", + "network:create_public_dns_domain": "", + "network:delete_dns_domain": "" +} diff --git a/openstack/etc/nova/release.sample b/openstack/etc/nova/release.sample new file mode 100644 index 00000000..4c0d8e48 --- /dev/null +++ b/openstack/etc/nova/release.sample @@ -0,0 +1,4 @@ +[Nova] +vendor = Fedora Project +product = OpenStack Nova +package = 1.fc18 diff --git a/openstack/etc/nova/rootwrap.conf b/openstack/etc/nova/rootwrap.conf new file mode 100644 index 00000000..aa466c5d --- /dev/null +++ b/openstack/etc/nova/rootwrap.conf @@ -0,0 +1,27 @@ +# Configuration for nova-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR diff --git a/openstack/etc/nova/rootwrap.d/api-metadata.filters b/openstack/etc/nova/rootwrap.d/api-metadata.filters new file mode 100644 index 00000000..1aa6f83e --- /dev/null +++ b/openstack/etc/nova/rootwrap.d/api-metadata.filters @@ -0,0 +1,13 @@ +# nova-rootwrap command filters for api-metadata nodes +# This is needed on nova-api hosts running with "metadata" in enabled_apis +# or when running nova-api-metadata +# This file should be owned by (and only-writeable by) the root user + +[Filters] +# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... +iptables-save: CommandFilter, iptables-save, root +ip6tables-save: CommandFilter, ip6tables-save, root + +# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-restore: CommandFilter, ip6tables-restore, root diff --git a/openstack/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters b/openstack/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters new file mode 100644 index 00000000..4132a999 --- /dev/null +++ b/openstack/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters @@ -0,0 +1,9 @@ +# nova-rootwrap command filters for compute nodes +# This file should be owned by (and only-writeable by) the root user + +[Filters] +# nova/virt/baremetal/ipmi.py: 'ipmitool', .. +ipmitool: CommandFilter, ipmitool, root + +# nova/virt/baremetal/ipmi.py: 'kill', '-TERM', str(console_pid) +kill_shellinaboxd: KillFilter, root, /usr/local/bin/shellinaboxd, -15, -TERM diff --git a/openstack/etc/nova/rootwrap.d/baremetal-deploy-helper.filters b/openstack/etc/nova/rootwrap.d/baremetal-deploy-helper.filters new file mode 100644 index 00000000..6d14b5d9 --- /dev/null +++ b/openstack/etc/nova/rootwrap.d/baremetal-deploy-helper.filters @@ -0,0 +1,11 @@ +# nova-rootwrap command filters for nova-baremetal-deploy-helper +# This file should be owned by (and only-writeable by) the root user + +[Filters] +# nova-baremetal-deploy-helper +iscsiadm: CommandFilter, iscsiadm, root +sfdisk: CommandFilter, sfdisk, root +dd: CommandFilter, dd, root +mkswap: CommandFilter, mkswap, root +blkid: CommandFilter, blkid, root +mkfs: CommandFilter, mkfs, root diff --git a/openstack/etc/nova/rootwrap.d/compute.filters b/openstack/etc/nova/rootwrap.d/compute.filters new file mode 100644 index 00000000..b79851b4 --- /dev/null +++ b/openstack/etc/nova/rootwrap.d/compute.filters @@ -0,0 +1,228 @@ +# nova-rootwrap command filters for compute nodes +# This file should be owned by (and only-writeable by) the root user + +[Filters] +# nova/virt/disk/mount/api.py: 'kpartx', '-a', device +# nova/virt/disk/mount/api.py: 'kpartx', '-d', device +kpartx: CommandFilter, kpartx, root + +# nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path +# nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path +tune2fs: CommandFilter, tune2fs, root + +# nova/virt/disk/mount/api.py: 'mount', mapped_device +# nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target +# nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'.. +# nova/virt/configdrive.py: 'mount', device, mountdir +# nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ... +mount: CommandFilter, mount, root + +# nova/virt/disk/mount/api.py: 'umount', mapped_device +# nova/virt/disk/api.py: 'umount' target +# nova/virt/xenapi/vm_utils.py: 'umount', dev_path +# nova/virt/configdrive.py: 'umount', mountdir +umount: CommandFilter, umount, root + +# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image +# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device +qemu-nbd: CommandFilter, qemu-nbd, root + +# nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image +# nova/virt/disk/mount/loop.py: 'losetup', '--detach', device +losetup: CommandFilter, losetup, root + +# nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path +# nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device +blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.* + +# nova/virt/disk/vfs/localfs.py: 'tee', canonpath +tee: CommandFilter, tee, root + +# nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath +mkdir: CommandFilter, mkdir, root + +# nova/virt/disk/vfs/localfs.py: 'chown' +# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log +# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log +# nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk') +chown: CommandFilter, chown, root + +# nova/virt/disk/vfs/localfs.py: 'chmod' +chmod: CommandFilter, chmod, root + +# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' +# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' +# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev +# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. +# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. +# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. +# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. +# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) +# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] +# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge +# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. +# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. +# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... +# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. +# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' +# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' +# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. +# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. +# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' +# nova/network/linux_net.py: 'ip', 'route', 'add', .. +# nova/network/linux_net.py: 'ip', 'route', 'del', . +# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev +ip: CommandFilter, ip, root + +# nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev +# nova/network/linux_net.py: 'tunctl', '-b', '-t', dev +tunctl: CommandFilter, tunctl, root + +# nova/virt/libvirt/vif.py: 'ovs-vsctl', ... +# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... +# nova/network/linux_net.py: 'ovs-vsctl', .... +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# nova/network/linux_net.py: 'ovs-ofctl', .... +ovs-ofctl: CommandFilter, ovs-ofctl, root + +# nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ... +dd: CommandFilter, dd, root + +# nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ... +iscsiadm: CommandFilter, iscsiadm, root + +# nova/virt/libvirt/volume.py: 'aoe-revalidate', aoedev +# nova/virt/libvirt/volume.py: 'aoe-discover' +aoe-revalidate: CommandFilter, aoe-revalidate, root +aoe-discover: CommandFilter, aoe-discover, root + +# nova/virt/xenapi/vm_utils.py: parted, --script, ... +# nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*. +parted: CommandFilter, parted, root + +# nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path +pygrub: CommandFilter, pygrub, root + +# nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s +fdisk: CommandFilter, fdisk, root + +# nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path +# nova/virt/disk/api.py: e2fsck, -f, -p, image +e2fsck: CommandFilter, e2fsck, root + +# nova/virt/xenapi/vm_utils.py: resize2fs, partition_path +# nova/virt/disk/api.py: resize2fs, image +resize2fs: CommandFilter, resize2fs, root + +# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... +iptables-save: CommandFilter, iptables-save, root +ip6tables-save: CommandFilter, ip6tables-save, root + +# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... +# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. +arping: CommandFilter, arping, root + +# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address +dhcp_release: CommandFilter, dhcp_release, root + +# nova/network/linux_net.py: 'kill', '-9', pid +# nova/network/linux_net.py: 'kill', '-HUP', pid +kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP + +# nova/network/linux_net.py: 'kill', pid +kill_radvd: KillFilter, root, /usr/sbin/radvd + +# nova/network/linux_net.py: dnsmasq call +dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq + +# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. +radvd: CommandFilter, radvd, root + +# nova/network/linux_net.py: 'brctl', 'addbr', bridge +# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 +# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' +# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface +brctl: CommandFilter, brctl, root + +# nova/virt/libvirt/utils.py: 'mkswap' +# nova/virt/xenapi/vm_utils.py: 'mkswap' +mkswap: CommandFilter, mkswap, root + +# nova/virt/xenapi/vm_utils.py: 'mkfs' +# nova/utils.py: 'mkfs', fs, path, label +mkfs: CommandFilter, mkfs, root + +# nova/virt/libvirt/utils.py: 'qemu-img' +qemu-img: CommandFilter, qemu-img, root + +# nova/virt/disk/vfs/localfs.py: 'readlink', '-e' +readlink: CommandFilter, readlink, root + +# nova/virt/disk/api.py: 'touch', target +touch: CommandFilter, touch, root + +# nova/virt/disk/api.py: +mkfs.ext3: CommandFilter, mkfs.ext3, root +mkfs.ntfs: CommandFilter, mkfs.ntfs, root + +# nova/virt/libvirt/connection.py: +read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi + +# nova/virt/libvirt/connection.py: +lvremove: CommandFilter, lvremove, root + +# nova/virt/libvirt/utils.py: +lvcreate: CommandFilter, lvcreate, root + +# nova/virt/libvirt/utils.py: +lvs: CommandFilter, lvs, root + +# nova/virt/libvirt/utils.py: +vgs: CommandFilter, vgs, root + +# nova/virt/baremetal/volume_driver.py: 'tgtadm', '--lld', 'iscsi', ... +tgtadm: CommandFilter, tgtadm, root + +# nova/utils.py:read_file_as_root: 'cat', file_path +# (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file) +read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd +read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow + +# nova/virt/libvirt/volume.py: 'multipath' '-R' +multipath: CommandFilter, multipath, root + +# nova/virt/libvirt/utils.py: +systool: CommandFilter, systool, root + +# nova/virt/libvirt/volume.py: +sginfo: CommandFilter, sginfo, root +sg_scan: CommandFilter, sg_scan, root +ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/ip-.*-iscsi-iqn.2010-10.org.openstack:volume-.*, /dev/disk/by-path/ip-.*-iscsi-iqn.2010-10.org.openstack:volume-.* + +# nova/volume/encryptors.py: +# nova/virt/libvirt/dmcrypt.py: +cryptsetup: CommandFilter, cryptsetup, root + +# nova/virt/xenapi/vm_utils.py: +xenstore-read: CommandFilter, xenstore-read, root + +# nova/virt/baremetal/tilera.py: 'rpc.mountd' +rpc.mountd: CommandFilter, rpc.mountd, root + +# nova/virt/libvirt/utils.py: +rbd: CommandFilter, rbd, root + +# nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path +shred: CommandFilter, shred, root + +# nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control.. +cp: CommandFilter, cp, root + +# nova/virt/xenapi/vm_utils.py: +sync: CommandFilter, sync, root + diff --git a/openstack/etc/nova/rootwrap.d/network.filters b/openstack/etc/nova/rootwrap.d/network.filters new file mode 100644 index 00000000..568e8d49 --- /dev/null +++ b/openstack/etc/nova/rootwrap.d/network.filters @@ -0,0 +1,94 @@ +# nova-rootwrap command filters for network nodes +# This file should be owned by (and only-writeable by) the root user + +[Filters] +# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' +# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' +# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev +# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. +# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. +# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. +# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. +# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) +# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] +# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge +# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. +# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. +# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... +# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. +# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' +# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' +# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. +# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. +# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' +# nova/network/linux_net.py: 'ip', 'route', 'add', .. +# nova/network/linux_net.py: 'ip', 'route', 'del', . +# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev +ip: CommandFilter, ip, root + +# nova/virt/libvirt/vif.py: 'ovs-vsctl', ... +# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... +# nova/network/linux_net.py: 'ovs-vsctl', .... +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# nova/network/linux_net.py: 'ovs-ofctl', .... +ovs-ofctl: CommandFilter, ovs-ofctl, root + +# nova/virt/libvirt/vif.py: 'ivs-ctl', ... +# nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ... +# nova/network/linux_net.py: 'ivs-ctl', .... +ivs-ctl: CommandFilter, ivs-ctl, root + +# nova/virt/libvirt/vif.py: 'ifc_ctl', ... +ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root + +# nova/virt/libvirt/vif.py: 'ebrctl', ... +ebrctl: CommandFilter, ebrctl, root + +# nova/virt/libvirt/vif.py: 'mm-ctl', ... +mm-ctl: CommandFilter, mm-ctl, root + +# nova/network/linux_net.py: 'ebtables', '-D' ... +# nova/network/linux_net.py: 'ebtables', '-I' ... +ebtables: CommandFilter, ebtables, root +ebtables_usr: CommandFilter, ebtables, root + +# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... +iptables-save: CommandFilter, iptables-save, root +ip6tables-save: CommandFilter, ip6tables-save, root + +# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... +# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. +arping: CommandFilter, arping, root + +# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address +dhcp_release: CommandFilter, dhcp_release, root + +# nova/network/linux_net.py: 'kill', '-9', pid +# nova/network/linux_net.py: 'kill', '-HUP', pid +kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP + +# nova/network/linux_net.py: 'kill', pid +kill_radvd: KillFilter, root, /usr/sbin/radvd + +# nova/network/linux_net.py: dnsmasq call +dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq + +# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. +radvd: CommandFilter, radvd, root + +# nova/network/linux_net.py: 'brctl', 'addbr', bridge +# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 +# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' +# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface +brctl: CommandFilter, brctl, root + +# nova/network/linux_net.py: 'sysctl', .... +sysctl: CommandFilter, sysctl, root + +# nova/network/linux_net.py: 'conntrack' +conntrack: CommandFilter, conntrack, root diff --git a/openstack/etc/sysctl.conf b/openstack/etc/sysctl.conf new file mode 100644 index 00000000..a4417acc --- /dev/null +++ b/openstack/etc/sysctl.conf @@ -0,0 +1,3 @@ +net.ipv4.ip_forward=1 +net.ipv4.conf.all.rp_filter=0 +net.ipv4.conf.default.rp_filter=0 diff --git a/openstack/etc/systemd/system/apache-httpd-server-setup.service b/openstack/etc/systemd/system/apache-httpd-server-setup.service new file mode 100644 index 00000000..17fa301b --- /dev/null +++ b/openstack/etc/systemd/system/apache-httpd-server-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run apache-httpd-server-setup (once) +After=local-fs.target + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/apache-httpd-server-setup +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/apache-httpd.service b/openstack/etc/systemd/system/apache-httpd.service new file mode 100644 index 00000000..01627e02 --- /dev/null +++ b/openstack/etc/systemd/system/apache-httpd.service @@ -0,0 +1,16 @@ +[Unit] +Description=Apache Web Server +Requires=apache-httpd-server-setup.service openstack-horizon-setup.service +After=network.target remote-fs.target nss-lookup.target apache-httpd-server-setup.service openstack-horizon-setup.service + +[Service] +Type=forking +PIDFile=/var/run/httpd.pid +ExecStart=/usr/sbin/apachectl start +ExecStop=/usr/sbin/apachectl graceful-stop +ExecReload=/usr/sbin/apachectl graceful +PrivateTmp=true +LimitNOFILE=infinity + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-cinder-api.service b/openstack/etc/systemd/system/openstack-cinder-api.service new file mode 100644 index 00000000..a4a100f8 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-cinder-api.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Volume Service (code-named Cinder) API server +After=syslog.target network.target + +[Service] +Type=simple +User=cinder +ExecStart=/usr/bin/cinder-api --config-file /etc/cinder/cinder.conf --log-file=/var/log/cinder/cinder-api.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-cinder-backup.service b/openstack/etc/systemd/system/openstack-cinder-backup.service new file mode 100644 index 00000000..a8f09ae0 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-cinder-backup.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Cinder backup server +After=syslog.target network.target + +[Service] +Type=simple +User=cinder +ExecStart=/usr/bin/cinder-backup --config-file /etc/cinder/cinder.conf --log-file=/var/log/cinder/cinder-backup.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-cinder-scheduler.service b/openstack/etc/systemd/system/openstack-cinder-scheduler.service new file mode 100644 index 00000000..feff70f4 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-cinder-scheduler.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Cinder scheduler server +After=syslog.target network.target + +[Service] +Type=simple +User=cinder +ExecStart=/usr/bin/cinder-scheduler --config-file /etc/cinder/cinder.conf --log-file=/var/log/cinder/cinder-scheduler.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-cinder-setup.service b/openstack/etc/systemd/system/openstack-cinder-setup.service new file mode 100644 index 00000000..2bd0a6bb --- /dev/null +++ b/openstack/etc/systemd/system/openstack-cinder-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run openstack-cinder-setup (once) +After=local-fs.target openstack-keystone-setup.service postgres-server.service + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/openstack-cinder-setup +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-cinder-volume.service b/openstack/etc/systemd/system/openstack-cinder-volume.service new file mode 100644 index 00000000..8852c90f --- /dev/null +++ b/openstack/etc/systemd/system/openstack-cinder-volume.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Cinder volume server +After=syslog.target network.target + +[Service] +Type=simple +User=cinder +ExecStart=/usr/bin/cinder-volume --config-file /etc/cinder/cinder.conf --log-file=/var/log/cinder/cinder-volume.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-glance-api.service b/openstack/etc/systemd/system/openstack-glance-api.service new file mode 100644 index 00000000..7958f84c --- /dev/null +++ b/openstack/etc/systemd/system/openstack-glance-api.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenStack Image Service (code-named Glance) API server +After=syslog.target network.target + +[Service] +Type=simple +User=glance +ExecStart=/usr/bin/glance-api --config-file /etc/glance/glance-api.conf + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/etc/systemd/system/openstack-glance-registry.service b/openstack/etc/systemd/system/openstack-glance-registry.service new file mode 100644 index 00000000..18a60ae1 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-glance-registry.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenStack Image Service (code-named Glance) Registry server +After=syslog.target network.target + +[Service] +Type=simple +User=glance +ExecStart=/usr/bin/glance-registry --config-file /etc/glance/glance-registry.conf + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/etc/systemd/system/openstack-glance-setup.service b/openstack/etc/systemd/system/openstack-glance-setup.service new file mode 100644 index 00000000..ee03c167 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-glance-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run openstack-glance-setup (once) +After=local-fs.target openstack-keystone-setup.service postgres-server.service + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/openstack-glance-setup +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-horizon-setup.service b/openstack/etc/systemd/system/openstack-horizon-setup.service new file mode 100644 index 00000000..082ec372 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-horizon-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run openstack-horizon-setup (once) +After=local-fs.target + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/openstack-horizon-setup +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-keystone-setup.service b/openstack/etc/systemd/system/openstack-keystone-setup.service new file mode 100644 index 00000000..fb2793bb --- /dev/null +++ b/openstack/etc/systemd/system/openstack-keystone-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run openstack-keystone-setup (once) +After=local-fs.target postgres-server.service + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/openstack-keystone-setup +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-keystone.service b/openstack/etc/systemd/system/openstack-keystone.service new file mode 100644 index 00000000..82b2d078 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-keystone.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenStack Identity Service (code-named Keystone) +After=syslog.target network.target + +[Service] +Type=notify +Restart=always +User=keystone +ExecStart=/usr/bin/keystone-all --config-file /etc/keystone/keystone.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-neutron-dhcp-agent.service b/openstack/etc/systemd/system/openstack-neutron-dhcp-agent.service new file mode 100644 index 00000000..a86b7a0a --- /dev/null +++ b/openstack/etc/systemd/system/openstack-neutron-dhcp-agent.service @@ -0,0 +1,14 @@ +[Unit] +Description=Neutron DHCP Agent +After=network-online.target openstack-neutron-ovs-cleanup.service + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-dhcp-agent \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/dhcp_agent.ini \ + --log-file=/var/log/neutron/dhcp-agent.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-neutron-l3-agent.service b/openstack/etc/systemd/system/openstack-neutron-l3-agent.service new file mode 100644 index 00000000..7fe12f46 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-neutron-l3-agent.service @@ -0,0 +1,15 @@ +[Unit] +Description=Neutron Layer 3 Agent +After=network-online.target openstack-neutron-ovs-cleanup.service + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-l3-agent \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/l3_agent.ini \ + --config-file=/etc/neutron/fwaas_driver.ini \ + --log-file=/var/log/neutron/l3-agent.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-neutron-metadata-agent.service b/openstack/etc/systemd/system/openstack-neutron-metadata-agent.service new file mode 100644 index 00000000..15fd406f --- /dev/null +++ b/openstack/etc/systemd/system/openstack-neutron-metadata-agent.service @@ -0,0 +1,14 @@ +[Unit] +Description=Neutron Metadata Plugin Agent +After=network-online.target openstack-neutron-setup.service + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-metadata-agent \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/metadata_agent.ini \ + --log-file=/var/log/neutron/metadata-agent.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-neutron-network-configuration-one-node.service b/openstack/etc/systemd/system/openstack-neutron-network-configuration-one-node.service new file mode 100644 index 00000000..2b1d168b --- /dev/null +++ b/openstack/etc/systemd/system/openstack-neutron-network-configuration-one-node.service @@ -0,0 +1,13 @@ +[Unit] +Description=Configuration script to set Openstack in one node networking +Wants=network-online.target +After=network-online.target openvswitch.service + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/openstack-neutron-network-configuration-for-one-node +Restart=no +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-neutron-ovs-cleanup.service b/openstack/etc/systemd/system/openstack-neutron-ovs-cleanup.service new file mode 100644 index 00000000..5eaf8a30 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-neutron-ovs-cleanup.service @@ -0,0 +1,14 @@ +[Unit] +Description=Neutron OVS cleanup +After=network-online.target openstack-neutron-setup.service +ConditionFileIsExecutable=/usr/bin/neutron-ovs-cleanup + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-ovs-cleanup \ + --log-file /var/log/neutron/ovs-cleanup.log \ + --config-file /etc/neutron/neutron.conf --verbose + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-neutron-plugin-openvswitch-agent.service b/openstack/etc/systemd/system/openstack-neutron-plugin-openvswitch-agent.service new file mode 100644 index 00000000..12498cc9 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-neutron-plugin-openvswitch-agent.service @@ -0,0 +1,14 @@ +[Unit] +Description=Neutron OpenvSwitch Plugin Agent +After=network-online.target openstack-neutron-setup.service + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-openvswitch-agent \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini \ + --log-file=/var/log/neutron/openvswitch-agent.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-neutron-server.service b/openstack/etc/systemd/system/openstack-neutron-server.service new file mode 100644 index 00000000..f48c0a88 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-neutron-server.service @@ -0,0 +1,15 @@ +[Unit] +Description=Neutron Api Server +Wants=openvswitch-create-links-one-node.service +After=network-online.target openstack-neutron-setup.service + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-server \ + --config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini \ + --log-file=/var/log/neutron/neutron-server.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-neutron-setup.service b/openstack/etc/systemd/system/openstack-neutron-setup.service new file mode 100644 index 00000000..38882e80 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-neutron-setup.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run openstack-neutron-setup (once) +After=network-online.target openstack-keystone-setup.service openvswitch-create-links-one-node.service postgres-server.service + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/openstack-neutron-setup +Restart=no +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-nova-api.service b/openstack/etc/systemd/system/openstack-nova-api.service new file mode 100644 index 00000000..de7412ee --- /dev/null +++ b/openstack/etc/systemd/system/openstack-nova-api.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Compute Service (code-named Nova) API server +After=syslog.target network.target + +[Service] +Type=simple +User=nova +ExecStart=/usr/bin/nova-api --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-nova-cert.service b/openstack/etc/systemd/system/openstack-nova-cert.service new file mode 100644 index 00000000..c21246bf --- /dev/null +++ b/openstack/etc/systemd/system/openstack-nova-cert.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Nova Cert +After=syslog.target network.target + +[Service] +Type=simple +User=nova +ExecStart=/usr/bin/nova-cert --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-nova-compute.service b/openstack/etc/systemd/system/openstack-nova-compute.service new file mode 100644 index 00000000..dc18f225 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-nova-compute.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenStack Compute Service (code-named Nova) API server +After=syslog.target network.target libvirtd.service +Requires=libvirtd.service + +[Service] +Type=simple +User=nova +ExecStart=/usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-nova-conductor.service b/openstack/etc/systemd/system/openstack-nova-conductor.service new file mode 100644 index 00000000..68e0a6ed --- /dev/null +++ b/openstack/etc/systemd/system/openstack-nova-conductor.service @@ -0,0 +1,12 @@ +[Unit] +Description=Database-access support for Compute nodes (nova-conductor) +After=syslog.target network.target libvirtd.service +Requires=libvirtd.service + +[Service] +Type=simple +User=nova +ExecStart=/usr/bin/nova-conductor --config-file /etc/nova/nova.conf --logfile /var/log/nova/conductor.log + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-nova-consoleauth.service b/openstack/etc/systemd/system/openstack-nova-consoleauth.service new file mode 100644 index 00000000..2c582518 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-nova-consoleauth.service @@ -0,0 +1,11 @@ +[Unit] +Description=Openstack Console Auth (nova-consoleauth) +After=syslog.target network.target + +[Service] +Type=simple +User=nova +ExecStart=/usr/bin/nova-consoleauth --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-nova-novncproxy.service b/openstack/etc/systemd/system/openstack-nova-novncproxy.service new file mode 100644 index 00000000..21013244 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-nova-novncproxy.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Nova NoVNC proxy +After=syslog.target network.target + +[Service] +Type=simple +User=nova +ExecStart=/usr/bin/nova-novncproxy --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-nova-scheduler.service b/openstack/etc/systemd/system/openstack-nova-scheduler.service new file mode 100644 index 00000000..519ef5eb --- /dev/null +++ b/openstack/etc/systemd/system/openstack-nova-scheduler.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Nova Scheduler +After=syslog.target network.target + +[Service] +Type=simple +User=nova +ExecStart=/usr/bin/nova-scheduler --config-file /etc/nova/nova.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-nova-setup.service b/openstack/etc/systemd/system/openstack-nova-setup.service new file mode 100644 index 00000000..e7a9136f --- /dev/null +++ b/openstack/etc/systemd/system/openstack-nova-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run openstack-nova-setup (once) +After=local-fs.target libvirtd.service openstack-keystone-setup.service postgres-server.service + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/openstack-nova-setup +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-rabbitmq-server.service b/openstack/etc/systemd/system/openstack-rabbitmq-server.service new file mode 100644 index 00000000..91bd6fa7 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-rabbitmq-server.service @@ -0,0 +1,16 @@ +[Unit] +Description=RabbitMQ broker +After=syslog.target network.target openstack-rabbitmq-setup.service +Requires=openstack-rabbitmq-setup.service + +[Service] +Type=notify +User=rabbitmq +Group=rabbitmq +Environment="HOME=/var/lib/rabbitmq" +WorkingDirectory=/var/lib/rabbitmq +ExecStart=/usr/sbin/rabbitmq-server +ExecStop=/usr/sbin/rabbitmqctl stop + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openstack-rabbitmq-setup.service b/openstack/etc/systemd/system/openstack-rabbitmq-setup.service new file mode 100644 index 00000000..85257fd1 --- /dev/null +++ b/openstack/etc/systemd/system/openstack-rabbitmq-setup.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run openstack-rabbitmq-setup (once) +Requires=local-fs.target +After=local-fs.target network.target + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/openstack-rabbitmq-setup +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openvswitch-create-links-one-node.service b/openstack/etc/systemd/system/openvswitch-create-links-one-node.service new file mode 100644 index 00000000..bacfc433 --- /dev/null +++ b/openstack/etc/systemd/system/openvswitch-create-links-one-node.service @@ -0,0 +1,14 @@ +[Unit] +Description=Create Veth pairs +Wants=openstack-neutron-network-configuration-one-node.service +After=network-online.target openstack-neutron-network-configuration-one-node.service + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/create_openvswitch_veth_pairs +RemainAfterExit=yes +Restart=no + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/etc/systemd/system/openvswitch-db-server.service b/openstack/etc/systemd/system/openvswitch-db-server.service new file mode 100644 index 00000000..e1cd2042 --- /dev/null +++ b/openstack/etc/systemd/system/openvswitch-db-server.service @@ -0,0 +1,11 @@ +[Unit] +Description=Open vSwitch Database Server Daemon +After=local-fs.target openvswitch-setup.service + +[Service] +Type=forking +ExecStart=/usr/sbin/ovsdb-server --remote=punix:/usr/local/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,Open_vSwitch,manager_options --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --pidfile --detach + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/etc/systemd/system/openvswitch-initialize-db.service b/openstack/etc/systemd/system/openvswitch-initialize-db.service new file mode 100644 index 00000000..3c564c5a --- /dev/null +++ b/openstack/etc/systemd/system/openvswitch-initialize-db.service @@ -0,0 +1,10 @@ +[Unit] +Description=Run openvswitch-initialize-db (once) +After=local-fs.target openvswitch-db-server.service + +ConditionPathExists=!/usr/local/var/run/openvswitch/openvswitch-initialize-db-flag + +[Service] +Type=oneshot +ExecStart=/usr/bin/ovs-vsctl --no-wait init +ExecStart=/bin/touch /usr/local/var/run/openvswitch/openvswitch-initialize-db-flag diff --git a/openstack/etc/systemd/system/openvswitch-setup.service b/openstack/etc/systemd/system/openvswitch-setup.service new file mode 100644 index 00000000..bfe67002 --- /dev/null +++ b/openstack/etc/systemd/system/openvswitch-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run openvswitch-setup (once) +After=local-fs.target + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/openvswitch-setup +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/openvswitch.service b/openstack/etc/systemd/system/openvswitch.service new file mode 100644 index 00000000..446c0f95 --- /dev/null +++ b/openstack/etc/systemd/system/openvswitch.service @@ -0,0 +1,10 @@ +[Unit] +Description=Open vSwitch Daemon +After=openvswitch-initialize-db.service + +[Service] +Type=forking +ExecStart=/usr/sbin/ovs-vswitchd --pidfile --detach + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/postgres-server.service b/openstack/etc/systemd/system/postgres-server.service new file mode 100644 index 00000000..6ee25e98 --- /dev/null +++ b/openstack/etc/systemd/system/postgres-server.service @@ -0,0 +1,26 @@ +[Unit] +Description=PostgreSQL database server +Requires=postgres-setup.service +After=postgres-setup.service + +[Service] +Type=forking +TimeoutSec=120 +User=postgres +Group=postgres + +Environment=PGROOT=/var/lib/pgsql + +SyslogIdentifier=postgres +PIDFile=/var/lib/pgsql/data/postmaster.pid + +ExecStart= /usr/bin/pg_ctl -s -D ${PGROOT}/data start -w -t 120 +ExecReload=/usr/bin/pg_ctl -s -D ${PGROOT}/data reload +ExecStop= /usr/bin/pg_ctl -s -D ${PGROOT}/data stop -m fast + +# Due to PostgreSQL's use of shared memory, OOM killer is often overzealous in +# killing Postgres, so adjust it downward +OOMScoreAdjust=-200 + +[Install] +WantedBy=multi-user.target diff --git a/openstack/etc/systemd/system/postgres-setup.service b/openstack/etc/systemd/system/postgres-setup.service new file mode 100644 index 00000000..281d0d77 --- /dev/null +++ b/openstack/etc/systemd/system/postgres-setup.service @@ -0,0 +1,11 @@ +[Unit] +Description=Run postgres-setup (once) +After=network.target + +[Service] +Type=oneshot +ExecStart=/usr/share/openstack/postgres-setup +Restart=no + +[Install] +WantedBy=multi-user.target diff --git a/openstack/manifest b/openstack/manifest new file mode 100644 index 00000000..a1badde4 --- /dev/null +++ b/openstack/manifest @@ -0,0 +1,187 @@ +0040755 0 0 /etc/keystone +0040755 0 0 /var/lib/keystone +0040755 0 0 /usr/share/openstack +0100755 0 0 /usr/share/openstack/openstack-keystone-setup +0100644 0 0 /etc/keystone/logging.conf +0100644 0 0 /etc/keystone/keystone.conf +0100644 0 0 /etc/keystone/policy.json +0100644 0 0 /etc/keystone/keystone-paste.ini +0100644 0 0 /etc/logrotate.d/openstack-keystone +0100644 0 0 /etc/systemd/system/openstack-keystone.service +0100644 0 0 /etc/systemd/system/openstack-keystone-setup.service +0040755 0 0 /etc/glance +0040755 0 0 /var/lib/glance +0040755 0 0 /var/lib/glance/image-cache +0040755 0 0 /var/lib/glance/image-cache/incomplete +0040755 0 0 /var/lib/glance/image-cache/invalid +0040755 0 0 /var/lib/glance/image-cache/queue +0040755 0 0 /var/lib/glance/images +0100644 0 0 /etc/glance/logging.conf +0100644 0 0 /etc/glance/glance-api.conf +0100644 0 0 /etc/glance/glance-registry.conf +0100644 0 0 /etc/glance/glance-scrubber.conf +0100644 0 0 /etc/glance/glance-cache.conf +0100644 0 0 /etc/glance/schema-image.json +0100644 0 0 /etc/glance/policy.json +0100644 0 0 /etc/glance/glance-api-paste.ini +0100644 0 0 /etc/glance/glance-registry-paste.ini +0100644 0 0 /etc/logrotate.d/openstack-glance-api +0100644 0 0 /etc/logrotate.d/openstack-glance-registry +0100644 0 0 /etc/systemd/system/openstack-glance-setup.service +0100644 0 0 /etc/systemd/system/openstack-glance-api.service +0100644 0 0 /etc/systemd/system/openstack-glance-registry.service +0100755 0 0 /usr/share/openstack/openstack-glance-setup +0040755 0 0 /var/lib/nova +0040755 0 0 /etc/nova +0100644 0 0 /etc/nova/logging.conf +0100644 0 0 /etc/nova/rootwrap.conf +0100644 0 0 /etc/nova/nova.conf +0100644 0 0 /etc/nova/nova-compute.conf +0100644 0 0 /etc/nova/policy.json +0100644 0 0 /etc/nova/cells.json +0100644 0 0 /etc/nova/api-paste.ini +0040755 0 0 /etc/nova/rootwrap.d/ +0100644 0 0 /etc/nova/rootwrap.d/api-metadata.filters +0100644 0 0 /etc/nova/rootwrap.d/baremetal-compute-ipmi.filters +0100644 0 0 /etc/nova/rootwrap.d/baremetal-deploy-helper.filters +0100644 0 0 /etc/nova/rootwrap.d/compute.filters +0100644 0 0 /etc/nova/rootwrap.d/network.filters +0100755 0 0 /usr/share/openstack/openstack-nova-setup +0100644 0 0 /etc/systemd/system/openstack-nova-setup.service +0100644 0 0 /etc/systemd/system/openstack-nova-compute.service +0100644 0 0 /etc/systemd/system/openstack-nova-conductor.service +0100644 0 0 /etc/systemd/system/openstack-nova-api.service +0100644 0 0 /etc/systemd/system/openstack-nova-scheduler.service +0100644 0 0 /etc/systemd/system/openstack-nova-consoleauth.service +0100644 0 0 /etc/systemd/system/openstack-nova-novncproxy.service +0100644 0 0 /etc/systemd/system/openstack-nova-cert.service +0100644 0 0 /etc/systemd/system/openstack-rabbitmq-setup.service +0100644 0 0 /etc/systemd/system/openstack-rabbitmq-server.service +0100755 0 0 /usr/share/openstack/openstack-rabbitmq-setup +0040755 0 0 /var/lib/rabbitmq +0040755 0 0 /var/lib/cinder +0040755 0 0 /etc/cinder +0100644 0 0 /etc/cinder/rootwrap.conf +0100644 0 0 /etc/cinder/cinder.conf +0100644 0 0 /etc/cinder/api-paste.ini +0100644 0 0 /etc/cinder/policy.json +0100755 0 0 /usr/share/openstack/openstack-cinder-setup +0100644 0 0 /etc/systemd/system/openstack-cinder-setup.service +0100644 0 0 /etc/systemd/system/openstack-cinder-api.service +0100644 0 0 /etc/systemd/system/openstack-cinder-scheduler.service +0100644 0 0 /etc/systemd/system/openstack-cinder-volume.service +0100644 0 0 /etc/systemd/system/openstack-cinder-backup.service +0040755 0 0 /etc/cinder/rootwrap.d/ +0100644 0 0 /etc/cinder/rootwrap.d/volume.filters +0040755 0 0 /etc/neutron +0040755 0 0 /var/lib/neutron +0100644 0 0 /etc/neutron/rootwrap.conf +0100644 0 0 /etc/neutron/neutron.conf +0100644 0 0 /etc/neutron/api-paste.ini +0100644 0 0 /etc/neutron/policy.json +0100644 0 0 /etc/neutron/l3_agent.ini +0100644 0 0 /etc/neutron/dhcp_agent.ini +0100644 0 0 /etc/neutron/lbaas_agent.ini +0100644 0 0 /etc/neutron/metadata_agent.ini +0100644 0 0 /etc/neutron/fwaas_driver.ini +0100644 0 0 /etc/neutron/metering_agent.ini +0100644 0 0 /etc/neutron/vpn_agent.ini +0040755 0 0 /etc/neutron/rootwrap.d/ +0100644 0 0 /etc/neutron/rootwrap.d/cisco-apic.filters +0100644 0 0 /etc/neutron/rootwrap.d/debug.filters +0100644 0 0 /etc/neutron/rootwrap.d/dhcp.filters +0100644 0 0 /etc/neutron/rootwrap.d/ipset-firewall.filters +0100644 0 0 /etc/neutron/rootwrap.d/iptables-firewall.filters +0100644 0 0 /etc/neutron/rootwrap.d/l3.filters +0100644 0 0 /etc/neutron/rootwrap.d/lbaas-haproxy.filters +0100644 0 0 /etc/neutron/rootwrap.d/linuxbridge-plugin.filters +0100644 0 0 /etc/neutron/rootwrap.d/nec-plugin.filters +0100644 0 0 /etc/neutron/rootwrap.d/ofagent.filters +0100644 0 0 /etc/neutron/rootwrap.d/openvswitch-plugin.filters +0100644 0 0 /etc/neutron/rootwrap.d/vpnaas.filters +0040755 0 0 /etc/neutron/plugins/ +0040755 0 0 /etc/neutron/plugins/bigswitch +0100644 0 0 /etc/neutron/plugins/bigswitch/restproxy.ini +0040755 0 0 /etc/neutron/plugins/bigswitch/ssl +0040755 0 0 /etc/neutron/plugins/bigswitch/ssl/ca_certs +0040755 0 0 /etc/neutron/plugins/bigswitch/ssl/host_certs +0100644 0 0 /etc/neutron/plugins/bigswitch/ssl/ca_certs/README +0100644 0 0 /etc/neutron/plugins/bigswitch/ssl/host_certs/README +0040755 0 0 /etc/neutron/plugins/brocade +0100644 0 0 /etc/neutron/plugins/brocade/brocade.ini +0040755 0 0 /etc/neutron/plugins/cisco +0100644 0 0 /etc/neutron/plugins/cisco/cisco_cfg_agent.ini +0100644 0 0 /etc/neutron/plugins/cisco/cisco_plugins.ini +0100644 0 0 /etc/neutron/plugins/cisco/cisco_router_plugin.ini +0100644 0 0 /etc/neutron/plugins/cisco/cisco_vpn_agent.ini +0040755 0 0 /etc/neutron/plugins/embrane +0100644 0 0 /etc/neutron/plugins/embrane/heleos_conf.ini +0040755 0 0 /etc/neutron/plugins/hyperv +0100644 0 0 /etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini +0040755 0 0 /etc/neutron/plugins/ibm +0100644 0 0 /etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini +0040755 0 0 /etc/neutron/plugins/linuxbridge +0100644 0 0 /etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini +0040755 0 0 /etc/neutron/plugins/metaplugin +0100644 0 0 /etc/neutron/plugins/metaplugin/metaplugin.ini +0040755 0 0 /etc/neutron/plugins/midonet +0100644 0 0 /etc/neutron/plugins/midonet/midonet.ini +0040755 0 0 /etc/neutron/plugins/ml2 +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf.ini +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf_arista.ini +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf_brocade.ini +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf_cisco.ini +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf_mlnx.ini +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf_ncs.ini +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf_odl.ini +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf_ofa.ini +0100644 0 0 /etc/neutron/plugins/ml2/ml2_conf_sriov.ini +0040755 0 0 /etc/neutron/plugins/mlnx +0100644 0 0 /etc/neutron/plugins/mlnx/mlnx_conf.ini +0040755 0 0 /etc/neutron/plugins/nec +0100644 0 0 /etc/neutron/plugins/nec/nec.ini +0040755 0 0 /etc/neutron/plugins/nuage +0100644 0 0 /etc/neutron/plugins/nuage/nuage_plugin.ini +0040755 0 0 /etc/neutron/plugins/oneconvergence +0100644 0 0 /etc/neutron/plugins/oneconvergence/nvsdplugin.ini +0040755 0 0 /etc/neutron/plugins/opencontrail +0100644 0 0 /etc/neutron/plugins/opencontrail/contrailplugin.ini +0040755 0 0 /etc/neutron/plugins/openvswitch +0100644 0 0 /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini +0040755 0 0 /etc/neutron/plugins/plumgrid +0100644 0 0 /etc/neutron/plugins/plumgrid/plumgrid.ini +0040755 0 0 /etc/neutron/plugins/vmware +0100644 0 0 /etc/neutron/plugins/vmware/nsx.ini +0100755 0 0 /usr/share/openstack/openstack-neutron-setup +0100644 0 0 /etc/systemd/system/openstack-neutron-setup.service +0100644 0 0 /etc/systemd/system/openstack-neutron-server.service +0100644 0 0 /etc/systemd/system/openstack-neutron-metadata-agent.service +0100644 0 0 /etc/systemd/system/openstack-neutron-plugin-openvswitch-agent.service +0100644 0 0 /etc/systemd/system/openstack-neutron-ovs-cleanup.service +0100644 0 0 /etc/systemd/system/openstack-neutron-dhcp-agent.service +0100644 0 0 /etc/systemd/system/openstack-neutron-l3-agent.service +0100755 0 0 /usr/share/openstack/openvswitch-setup +0100644 0 0 /etc/systemd/system/openvswitch-setup.service +0100644 0 0 /etc/systemd/system/openvswitch-db-server.service +0100644 0 0 /etc/systemd/system/openvswitch.service +0100755 0 0 /usr/share/openstack/openstack-neutron-network-configuration-for-one-node +0100644 0 0 /etc/systemd/system/openstack-neutron-network-configuration-one-node.service +0100644 0 0 /etc/systemd/system/openvswitch-initialize-db.service +0100755 0 0 /usr/share/openstack/postgres-setup +0100644 0 0 /etc/systemd/system/postgres-setup.service +0100644 0 0 /etc/systemd/system/postgres-server.service +0100755 0 0 /usr/share/openstack/apache-httpd-server-setup +0100644 0 0 /etc/systemd/system/apache-httpd-server-setup.service +0100644 0 0 /etc/systemd/system/apache-httpd.service +0040755 0 0 /etc/horizon +0040755 0 0 /etc/horizon/openstack_dashboard +0040755 0 0 /var/lib/horizon +0100644 0 0 /etc/systemd/system/openstack-horizon-setup.service +0100755 0 0 /usr/share/openstack/openstack-horizon-setup +0100644 0 0 /etc/horizon/apache-horizon.conf +0100644 0 0 /etc/horizon/openstack_dashboard/local_settings.py +0100644 0 0 /etc/sysctl.conf +0100644 0 0 /etc/systemd/system/openvswitch-create-links-one-node.service +0100755 0 0 /usr/share/openstack/create_openvswitch_veth_pairs +0100644 0 0 /etc/hosts diff --git a/openstack/usr/share/openstack/apache-httpd-server-setup b/openstack/usr/share/openstack/apache-httpd-server-setup new file mode 100755 index 00000000..4c0b3cee --- /dev/null +++ b/openstack/usr/share/openstack/apache-httpd-server-setup @@ -0,0 +1,47 @@ +#!/bin/sh +# +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +if [ -f /var/openstack/apache-http-setup ]; then + exit 0 +fi + +############################################################################### +# Move suexec to the directory where apache is configured to have it # +# Check configure in strata/apache-httpd-server/httpd-server.morph # +############################################################################### +mkdir -p /srv/www +groupadd -r apache +useradd -c "Apache Server" -d /srv/www -g apache \ + -s /bin/false apache + +############################################################################### +# Move suexec to the directory where apache is configured to have it # +# Check configure in strata/apache-httpd-server/httpd-server.morph # +############################################################################### +mkdir -p /usr/lib/httpd +mv -v /usr/sbin/suexec /usr/lib/httpd/suexec +chgrp apache /usr/lib/httpd/suexec +chmod 4754 /usr/lib/httpd/suexec +chown -R apache:apache /srv/www + +install -D -m 644 /proc/self/fd/0 <<'EOF' /var/openstack/apache-http-setup +Apache http setup: success +EOF + +exit 0 diff --git a/openstack/usr/share/openstack/create_openvswitch_veth_pairs b/openstack/usr/share/openstack/create_openvswitch_veth_pairs new file mode 100644 index 00000000..a239ac73 --- /dev/null +++ b/openstack/usr/share/openstack/create_openvswitch_veth_pairs @@ -0,0 +1,32 @@ +#!/bin/sh +# +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -xe + +# Get the first ethernet driver and its ip +eth_dev="$(ip addr | perl -pe 'if (/^\d+: ([^:]+)/) { $iface=$1; } if (m@^\s*inet ([^/]+)/@) { print "$iface $1\n"; } $_=undef;' | grep "^e" | head -1 | awk '{ print $1 } ')" +eth_ip="$(ip addr | perl -pe 'if (/^\d+: ([^:]+)/) { $iface=$1; } if (m@^\s*inet ([^/]+)/@) { print "$iface $1\n"; } $_=undef;' | grep "^e" | head -1 | awk '{ print $2 } ')" + +# Create the veth pairs between bridges (configuration one node) +ifconfig br-eth0 $eth_ip up +ip link set br-eth0 promisc on +ip link set eth1-br-proxy up promisc on +ip link set ex-br-proxy up promisc on +ip link set proxy-br-eth1 up promisc on +ip link set proxy-br-ex up promisc on + +exit 0 diff --git a/openstack/usr/share/openstack/openstack-cinder-setup b/openstack/usr/share/openstack/openstack-cinder-setup new file mode 100644 index 00000000..eb97d55a --- /dev/null +++ b/openstack/usr/share/openstack/openstack-cinder-setup @@ -0,0 +1,132 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +# Create required system users and groups + +getent group cinder >/dev/null || groupadd -r --gid 165 cinder +getent passwd cinder >/dev/null || \ + useradd --uid 165 -r -g cinder -d /var/lib/cinder -s /sbin/nologin \ + -c "OpenStack Cinder Daemons" cinder + +# Create the keystone user and services + +export OS_SERVICE_TOKEN=##KEYSTONE_TEMPORARY_ADMIN_TOKEN## +export OS_SERVICE_ENDPOINT='http://onenode:35357/v2.0' + +keystone user-create --name ##CINDER_USER## --pass ##CINDER_PASSWORD## +keystone user-role-add --tenant service --user ##CINDER_USER## --role admin + +# Register the Block Storage service with the Identity service so other OpenStack services +# can locate it +keystone service-create --name ##CINDER_USER## --type volume --description "OpenStack Block Storage" +keystone endpoint-create --service-id $(keystone service-list | awk '/ volume / {print $2}') \ + --publicurl ##CINDER_PUBLIC_URL## \ + --internalurl ##CINDER_INTERNAL_URL## \ + --adminurl ##CINDER_ADMIN_URL## + +# Register a service and endpoint for version 2 of the Block Storage service API +keystone service-create --name ##CINDER_USER_V2## \ + --type volumev2 --description "OpenStack Block Storage" +keystone endpoint-create --service-id $(keystone service-list | awk '/ volumev2 / {print $2}') \ + --publicurl ##CINDER_PUBLIC_URL_V2## \ + --internalurl ##CINDER_INTERNAL_URL_V2## \ + --adminurl ##CINDER_ADMIN_URL_V2## + +# Create run directory for cinder +if [ ! -d /var/run/cinder ]; then + mkdir -p /var/run/cinder + chown -R cinder:cinder /var/run/cinder +fi + +# Create the lock directory for cinder +if [ ! -d /var/lock/cinder ]; then + mkdir -p /var/lock/cinder + chown -R cinder:cinder /var/lock/cinder +fi + +# Create the log directory for cinder +if [ ! -d /var/log/cinder ]; then + mkdir -p /var/log/cinder + chown -R cinder:cinder /var/log/cinder +fi + +# Create the volumes directory for cinder +if [ ! -d /var/lib/cinder/volumes ]; then + mkdir -p /var/lib/cinder/volumes + chown -R cinder:cinder /var/lib/cinder/volumes +fi + +# Setup the cinder database +if ! sudo -u postgres psql -lqt | grep -q cinder; then + # Create posgreSQL user + sudo -u postgres createuser \ + --pwprompt --encrypted \ + --no-adduser --no-createdb \ + --no-password \ + ##CINDER_DB_USER## + + sudo -u postgres createdb \ + --owner=##CINDER_DB_USER## \ + cinder + + sudo -u cinder cinder-manage db sync +fi + +chown -R cinder:cinder /var/lib/cinder + +# This is only for testing purposes and we need to change it for +# something more robust to deploy in production. +# This also assumes that the user will add a second disk to its VM +# and if it does not find sdb or vda it will fail. +if [ $(ls /sys/block | grep -v sda | grep [vs]d | wc -l) -ne 1 ]; then + echo "Error: More than one or none block device found, cinder will not be able to create a VG." + exit 1 +else + device=/dev/$(ls /sys/block | grep -v sda | grep [vs]d) +fi + +# Create a physical volume +pvcreate -ff -y $device + +# Create a volume group named "cinder-volumes" +vgcreate -y cinder-volumes $device + +# Remove the one-shot setup service +rm /etc/systemd/system/multi-user.target.wants/openstack-cinder-setup.service + +# Start cinder services +systemctl start openstack-cinder-api +systemctl start openstack-cinder-scheduler +systemctl start openstack-cinder-volume +systemctl start openstack-cinder-backup + +# Create the links to run nova services when system start next times. +ln -s "/etc/systemd/system/openstack-cinder-api.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-cinder-api.service" + +ln -s "/etc/systemd/system/openstack-cinder-scheduler.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-cinder-scheduler.service" + +ln -s "/etc/systemd/system/openstack-cinder-volume.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-cinder-volume.service" + +ln -s "/etc/systemd/system/openstack-cinder-backup.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-cinder-backup.service" + +exit 0 diff --git a/openstack/usr/share/openstack/openstack-glance-setup b/openstack/usr/share/openstack/openstack-glance-setup new file mode 100644 index 00000000..1363a7b7 --- /dev/null +++ b/openstack/usr/share/openstack/openstack-glance-setup @@ -0,0 +1,89 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +# Create required system users and groups +getent group glance >/dev/null || groupadd -r --gid 164 glance +getent passwd glance >/dev/null || \ + useradd --uid 164 -r -g glance -d /var/lib/glance -s /sbin/nologin \ + -c "OpenStack Glance Daemons" glance + +# Create required keystone tenants, users and roles +export OS_SERVICE_TOKEN=##KEYSTONE_TEMPORARY_ADMIN_TOKEN## +export OS_SERVICE_ENDPOINT='http://onenode:35357/v2.0' + +keystone user-create --name ##GLANCE_SERVICE_USER## --pass ##GLANCE_SERVICE_PASSWORD## +keystone user-role-add --tenant service --user ##GLANCE_SERVICE_USER## --role admin + +keystone service-create --name glance --type image --description "OpenStack Image Service" +keystone endpoint-create --service-id $(keystone service-list | awk '/ image / {print $2}') \ + --publicurl ##GLANCE_PUBLIC_URL## \ + --internalurl ##GLANCE_INTERNAL_URL## \ + --adminurl ##GLANCE_ADMIN_URL## + +# Create run directory for glance +if [ ! -d /var/run/glance ]; then + mkdir -p /var/run/glance + chown -R glance:glance /var/run/glance +fi + +# Create the lock directory for glance +if [ ! -d /var/lock/glance ]; then + mkdir -p /var/lock/glance + chown -R glance:glance /var/lock/glance +fi + +# Create the log directory for glance +if [ ! -d /var/log/glance ]; then + mkdir -p /var/log/glance + chown -R glance:glance /var/log/glance +fi + +# Setup the glance database +if ! sudo -u postgres psql -lqt | grep -q glance; then + # Create posgreSQL user + sudo -u postgres createuser \ + --pwprompt --encrypted \ + --no-adduser --no-createdb \ + --no-password \ + ##GLANCE_DB_USER## + + sudo -u postgres createdb \ + --owner=##GLANCE_DB_USER## \ + glance + + sudo -u glance glance-manage db_sync +fi + +chown -R glance:glance /var/lib/glance + +# Remove the one-shot setup service +rm /etc/systemd/system/multi-user.target.wants/openstack-glance-setup.service + +# Start glance services +systemctl start openstack-glance-api +systemctl start openstack-glance-registry + +# Create the links to run glance services when system start next times. +ln -s "/etc/systemd/system/openstack-glance-api.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-glance-api.service" + +ln -s "/etc/systemd/system/openstack-glance-registry.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service" + +exit 0 diff --git a/openstack/usr/share/openstack/openstack-horizon-setup b/openstack/usr/share/openstack/openstack-horizon-setup new file mode 100644 index 00000000..d10007d1 --- /dev/null +++ b/openstack/usr/share/openstack/openstack-horizon-setup @@ -0,0 +1,63 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +if [ -f /var/openstack/horizon-setup ]; then + exit 0 +fi + +# Create required system users and groups +mkdir -p /var/lib/horizon +getent group horizon >/dev/null || groupadd -r --gid 167 horizon +getent passwd neutron >/dev/null || \ + useradd --uid 167 -r -g horizon -d /var/lib/horizon -s /sbin/nologin \ + -c "Horizon user" horizon + +# Work around to make django.wsgi working with horizon +# See: https://bugs.launchpad.net/osprofiler/+bug/1361235 +# and: https://git.openstack.org/cgit/openstack/tripleo-image-elements/commit/?id=41c9a1dfad23f8aee366afb6a0b20a6c57ec8f79 +sed -i "s|'../..'|os.path.realpath('../..')|" \ + /usr/lib/python2.7/site-packages/openstack_dashboard/wsgi/django.wsgi + +# And link this django.wsgi file to the horizon home directory +ln -sf /usr/lib/python2.7/site-packages/openstack_dashboard/wsgi/django.wsgi \ + /var/lib/horizon/django.wsgi + +# Link Openstack local_settings where openstack_dashboard is installed. +ln -sf /etc/horizon/openstack_dashboard/local_settings.py \ + /usr/lib/python2.7/site-packages/openstack_dashboard/local/local_settings.py + +# Create the static directory (STATIC_ROOT) used in local_settings.py to keep +# the static objects like css files. +mkdir -p /var/lib/horizon/openstack_dashboard/static + +# Create the horizon document root for apache configuration +mkdir -p /var/lib/horizon/openstack_dashboard/.blackhole + +# Link horizon configuration file for apache to the directory where our apache load +# configurations files +ln -sf /etc/horizon/apache-horizon.conf \ + /usr/httpd/conf.d/apache-horizon.conf + +chown -R horizon:horizon /var/lib/horizon + +install -D -m 644 /proc/self/fd/0 <<'EOF' /var/openstack/horizon-setup +Horizon setup: success +EOF + +exit 0 diff --git a/openstack/usr/share/openstack/openstack-keystone-setup b/openstack/usr/share/openstack/openstack-keystone-setup new file mode 100644 index 00000000..9c034c5b --- /dev/null +++ b/openstack/usr/share/openstack/openstack-keystone-setup @@ -0,0 +1,92 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +# Create required system users and groups + +getent group keystone >/dev/null || groupadd -r --gid 163 keystone +getent passwd keystone >/dev/null || \ + useradd --uid 163 -r -g keystone -d /var/lib/keystone -s /sbin/nologin \ + -c "OpenStack Keystone Daemons" keystone + +# Keystone compute configuration +if [ ! -d /var/run/keystone ]; then + mkdir -p /var/run/keystone + chown -R keystone:keystone /var/run/keystone +fi + +if [ ! -d /var/lock/keystone ]; then + mkdir -p /var/lock/keystone + chown -R keystone:keystone /var/lock/keystone +fi + +if [ ! -d /var/log/keystone ]; then + mkdir -p /var/log/keystone + chown -R keystone:keystone /var/log/keystone +fi + +# Setup the keystone database +if ! sudo -u postgres psql -lqt | grep -q keystone; then + # Create posgreSQL user + sudo -u postgres createuser \ + --pwprompt --encrypted \ + --no-adduser --no-createdb \ + --no-password \ + ##KEYSTONE_DB_USER## + + sudo -u postgres createdb \ + --owner=##KEYSTONE_DB_USER## \ + keystone + + sudo -u keystone keystone-manage db_sync +fi + +chown -R keystone:keystone /var/lib/keystone + +systemctl start openstack-keystone + +export OS_SERVICE_TOKEN=##KEYSTONE_TEMPORARY_ADMIN_TOKEN## +export OS_SERVICE_ENDPOINT='http://onenode:35357/v2.0' + +# This script creates a TEMPORARY admin user, with a password that may +# float arount on the system. Please delete this user once you have set up +# the real admin user with a real secure password. + +keystone tenant-create --name admin --description "Admin Tenant" +keystone role-create --name admin + +keystone user-create --name temporary_admin --pass ##KEYSTONE_TEMPORARY_ADMIN_PASSWORD## +keystone user-role-add --tenant admin --user temporary_admin --role admin + +keystone tenant-create --name service --description "Service Tenant" + +# Define a service for the Identity Service +keystone service-create --name keystone --type identity --description "Openstack Identity" + +# Specify an API endpoint for the Identity Service by using the returned service ID. +keystone endpoint-create --service-id $(keystone service-list | awk '/ identity / {print $2}') \ + --publicurl ##KEYSTONE_PUBLIC_URL## \ + --internalurl ##KEYSTONE_INTERNAL_URL## \ + --adminurl ##KEYSTONE_ADMIN_URL## + +rm /etc/systemd/system/multi-user.target.wants/openstack-keystone-setup.service + +ln -s "/etc/systemd/system/openstack-keystone.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-keystone.service" + +exit 0 diff --git a/openstack/usr/share/openstack/openstack-neutron-network-configuration-for-one-node b/openstack/usr/share/openstack/openstack-neutron-network-configuration-for-one-node new file mode 100644 index 00000000..c2ccbd81 --- /dev/null +++ b/openstack/usr/share/openstack/openstack-neutron-network-configuration-for-one-node @@ -0,0 +1,56 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -xe + +# Get the first ethernet driver and its ip +eth_dev="$(ip addr | perl -pe 'if (/^\d+: ([^:]+)/) { $iface=$1; } if (m@^\s*inet ([^/]+)/@) { print "$iface $1\n"; } $_=undef;' | grep "^e" | head -1 | awk '{ print $1 } ')" +eth_ip="$(ip addr | perl -pe 'if (/^\d+: ([^:]+)/) { $iface=$1; } if (m@^\s*inet ([^/]+)/@) { print "$iface $1\n"; } $_=undef;' | grep "^e" | head -1 | awk '{ print $2 } ')" + +ip link add proxy-br-eth1 type veth peer name eth1-br-proxy +ip link add proxy-br-ex type veth peer name ex-br-proxy + +if [ -f /var/openstack/openvswitch-one-node-setup ]; then + exit 0 +fi + +# Create the bridges to use the External network mapped +# This configuration is for 1 node and it was taken from: +# https://fosskb.wordpress.com/2014/10/18/openstack-juno-on-ubuntu-14-10/ +# and https://fosskb.wordpress.com/2014/06/10/managing-openstack-internaldataexternal-network-in-one-interface/ + + +ovs-vsctl add-br br-eth0 +ovs-vsctl add-port br-eth0 $eth_dev +ifconfig br-eth0 $eth_ip up +ip link set br-eth0 promisc on +ovs-vsctl add-br br-eth1 +ovs-vsctl add-br br-ex +ovs-vsctl add-port br-eth1 eth1-br-proxy +ovs-vsctl add-port br-ex ex-br-proxy +ovs-vsctl add-port br-eth0 proxy-br-eth1 +ovs-vsctl add-port br-eth0 proxy-br-ex +ip link set eth1-br-proxy up promisc on +ip link set ex-br-proxy up promisc on +ip link set proxy-br-eth1 up promisc on +ip link set proxy-br-ex up promisc on + +install -D -m 644 /proc/self/fd/0 <<'EOF' /var/openstack/openvswitch-one-node-setup +Openvswitch one node setup: success +EOF + +exit 0 diff --git a/openstack/usr/share/openstack/openstack-neutron-setup b/openstack/usr/share/openstack/openstack-neutron-setup new file mode 100644 index 00000000..ff6496a0 --- /dev/null +++ b/openstack/usr/share/openstack/openstack-neutron-setup @@ -0,0 +1,95 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -xe + +if [ -f /var/openstack/openstack-neutron-setup ]; then + exit 0 +fi + +# Create required system users and groups + +getent group neutron >/dev/null || groupadd -r --gid 166 neutron +getent passwd neutron >/dev/null || \ + useradd --uid 166 -r -g neutron -d /var/lib/neutron -s /sbin/nologin \ + -c "OpenStack Neutron Daemons" neutron + +chown -R neutron:neutron /var/lib/neutron + +# Create the keystone user and services +export OS_SERVICE_TOKEN=##KEYSTONE_TEMPORARY_ADMIN_TOKEN## +export OS_SERVICE_ENDPOINT='http://onenode:35357/v2.0' + +keystone user-create --name ##NEUTRON_SERVICE_USER## --pass ##NEUTRON_SERVICE_PASSWORD## +keystone user-role-add --tenant service --user ##NEUTRON_SERVICE_USER## --role admin + +keystone service-create --name neutron --type network --description "OpenStack Networking" +keystone endpoint-create --service-id $(keystone service-list | awk '/ network / {print $2}') \ + --publicurl ##NEUTRON_PUBLIC_URL## \ + --internalurl ##NEUTRON_INTERNAL_URL## \ + --adminurl ##NEUTRON_ADMIN_URL## \ + --region regionOne + +# neutron.conf configuration +service_tenant_id=$(keystone tenant-get service | grep id | tr -d " " | cut -d"|" -f3) +sed -i "s/##SERVICE_TENANT_ID##/$service_tenant_id/g" /etc/neutron/neutron.conf + +# Neutron compute configuration +if [ ! -d /var/run/neutron ]; then + mkdir -p /var/run/neutron + chown -R neutron:neutron /var/run/neutron +fi + +if [ ! -d /var/lock/neutron ]; then + mkdir -p /var/lock/neutron + chown -R neutron:neutron /var/lock/neutron +fi + +if [ ! -d /var/log/neutron ]; then + mkdir -p /var/log/neutron + chown -R neutron:neutron /var/log/neutron +fi + +# Setup the neutron database +if ! sudo -u postgres psql -lqt | grep -q neutron; then + # Create postgresSQL user + sudo -u postgres createuser \ + --pwprompt --encrypted \ + --no-adduser --no-createdb \ + --no-password \ + ##NEUTRON_DB_USER## + sudo -u postgres createdb \ + --owner=##NEUTRON_DB_USER## \ + neutron + # Stamp neutron database with the latest stamped version available, + # in this case "icehouse" + sudo -u neutron neutron-db-manage \ + --config-file /etc/neutron/neutron.conf \ + --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \ + stamp icehouse + # Upgrade database to "juno" + sudo -u neutron neutron-db-manage \ + --config-file /etc/neutron/neutron.conf \ + --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \ + upgrade juno +fi + +install -D -m 644 /proc/self/fd/0 <<'EOF' /var/openstack/openstack-neutron-setup +Openstack neutron setup: success +EOF + +exit 0 diff --git a/openstack/usr/share/openstack/openstack-nova-setup b/openstack/usr/share/openstack/openstack-nova-setup new file mode 100644 index 00000000..7168e7c2 --- /dev/null +++ b/openstack/usr/share/openstack/openstack-nova-setup @@ -0,0 +1,133 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +# Create required system users and groups + +getent group nova >/dev/null || groupadd -r --gid 162 nova +getent passwd nova >/dev/null || \ + useradd --uid 162 -r -g nova -d /var/lib/nova -s /sbin/nologin \ + -c "OpenStack Nova Daemons" nova + +# Create the keystone user and services + +export OS_SERVICE_TOKEN=##KEYSTONE_TEMPORARY_ADMIN_TOKEN## +export OS_SERVICE_ENDPOINT='http://onenode:35357/v2.0' + +keystone user-create --name ##NOVA_SERVICE_USER## --pass ##NOVA_SERVICE_PASSWORD## +keystone user-role-add --tenant service --user ##NOVA_SERVICE_USER## --role admin + +keystone service-create --name nova --type compute --description "OpenStack Compute Service" +keystone endpoint-create --service-id $(keystone service-list | awk '/ compute / {print $2}') \ + --publicurl ##NOVA_PUBLIC_URL## \ + --internalurl ##NOVA_INTERNAL_URL## \ + --adminurl ##NOVA_ADMIN_URL## \ + --region ##NOVA_REGION## + +# Nova compute configuration +if [ ! -d /var/run/nova ]; then + mkdir -p /var/run/nova + chown -R nova:nova /var/run/nova +fi + +if [ ! -d /var/lock/nova ]; then + mkdir -p /var/lock/nova + chown -R nova:nova /var/lock/nova +fi + +if [ ! -d /var/log/nova ]; then + mkdir -p /var/log/nova + chown -R nova:nova /var/log/nova +fi + +if [ ! -d /var/lib/nova/instances ]; then + mkdir /var/lib/nova/instances + chown -R nova:nova /var/lib/nova/instances +fi + +# Setup the nova database +if ! sudo -u postgres psql -lqt | grep -q nova; then + # Create posgreSQL user + sudo -u postgres createuser \ + --pwprompt --encrypted \ + --no-adduser --no-createdb \ + --no-password \ + ##NOVA_DB_USER## + + sudo -u postgres createdb \ + --owner=##NOVA_DB_USER## \ + nova + + sudo -u nova nova-manage db sync +fi + +# Nova novncproxy needs /usr/share/novnc folder available +if [ ! -d /usr/share/novnc ]; then + mkdir /usr/share/novnc + chown -R nova:nova /usr/share/novnc +fi + +chown -R nova:nova /var/lib/nova + +# Add nova to the libvirt group +usermod -a -G libvirt nova + +# Check existence of Network Block Device module in the kernel +# NOTE: modprobe does not work actually and returns always +# failure, enable this check when modprobe is fixed. +#modprobe nbd + +# Remove the one-shot setup service +rm /etc/systemd/system/multi-user.target.wants/openstack-nova-setup.service + +# Start nova services +systemctl start openstack-nova-compute +# [1] Never enable openstack-nova-conductor service in a node with +# openstack-nova-compute or the security benefits of removing +# database access from nova-compute will be negated +#systemctl start openstack-nova-conductor +systemctl start openstack-nova-api +systemctl start openstack-nova-cert +systemctl start openstack-nova-consoleauth +systemctl start openstack-nova-scheduler +systemctl start openstack-nova-novncproxy +#systemctl start openstack-nova-xvpnvncproxy + +# Create the links to run nova services when system start next times. +ln -s "/etc/systemd/system/openstack-nova-compute.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service" +# See description of why this shouldn't run in a openstack in one node in [1] +#ln -s "/etc/systemd/system/openstack-nova-conductor.service" \ +# "/etc/systemd/system/multi-user.target.wants/openstack-nova-conductor.service" + +ln -s "/etc/systemd/system/openstack-nova-api.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-nova-api.service" + +ln -s "/etc/systemd/system/openstack-nova-cert.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-nova-cert.service" + +ln -s "/etc/systemd/system/openstack-nova-consoleauth.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-nova-consoleauth.service" + +ln -s "/etc/systemd/system/openstack-nova-scheduler.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-nova-scheduler.service" + +ln -s "/etc/systemd/system/openstack-nova-novncproxy.service" \ + "/etc/systemd/system/multi-user.target.wants/openstack-nova-novncproxy.service" + +exit 0 diff --git a/openstack/usr/share/openstack/openstack-rabbitmq-setup b/openstack/usr/share/openstack/openstack-rabbitmq-setup new file mode 100644 index 00000000..dd491294 --- /dev/null +++ b/openstack/usr/share/openstack/openstack-rabbitmq-setup @@ -0,0 +1,66 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +# Create required system users and groups +getent group rabbitmq >/dev/null || groupadd -r --gid 1002 rabbitmq +getent passwd rabbitmq >/dev/null || \ + useradd --uid 1002 -r -g rabbitmq -d /var/lib/rabbitmq -s /sbin/nologin \ + -c "Rabbitmq server daemon" rabbitmq + +chown -R rabbitmq:rabbitmq /var/lib/rabbitmq + +# Create directories and files needed to run openstack-rabbitmq-server +if [ ! -d /var/run/rabbitmq ]; then + mkdir -p /var/run/rabbitmq + chown -R rabbitmq:rabbitmq /var/run/rabbitmq + chmod 755 /var/run/rabbitmq +fi + +if [ ! -d /var/log/rabbitmq ]; then + mkdir -p /var/log/rabbitmq + chown -R rabbitmq:rabbitmq /var/log/rabbitmq + chmod 755 /var/log/rabbitmq +fi + +# Install rabbimq.config and rabbitmq-env.conf +install -D -m 644 /proc/self/fd/0 <<'EOF' /var/lib/rabbitmq/etc/rabbitmq/rabbitmq.config +%% -*- Rabbit configuration for Openstack in Baserock +[ + {rabbit, + [ + {default_user, <<"##RABBITMQ_USER##">>}, + {default_pass, <<"##RABBITMQ_PASSWORD##">>}, + {tcp_listeners, [##RABBITMQ_PORT##]} + ]} +]. +EOF + +install -D -m 644 /proc/self/fd/0 <<'EOF' /etc/rabbitmq/rabbitmq-env.conf +# NOTE: Install this file in /etc/rabbitmq/ because rabbitmq-server will load +# it only from this directory. + +# Configure port node where rabbitmq-server will listen from. +NODE_PORT=##RABBITMQ_PORT## +# Config file has to be in $RABBITMQ_HOME/etc/rabbitmq/rabbitmq.config +CONFIG_FILE=/var/lib/rabbitmq/etc/rabbitmq/rabbitmq +EOF + +chown -R rabbitmq:rabbitmq /var/lib/rabbitmq + +exit 0 diff --git a/openstack/usr/share/openstack/openvswitch-setup b/openstack/usr/share/openstack/openvswitch-setup new file mode 100644 index 00000000..a5db7d21 --- /dev/null +++ b/openstack/usr/share/openstack/openvswitch-setup @@ -0,0 +1,44 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +if [ -f /var/openstack/openvswitch-setup ]; then + exit 0 +fi + +# Create the openvswitch required folders +mkdir -p /usr/local/etc/openvswitch +mkdir -p /usr/local/var/run/openvswitch + +# Define openvswitch files +# Example (default values) +#openvswitch_database=/usr/local/etc/openvswitch/conf.db +#openvswitch_pidfile=/usr/local/var/run/openvswitch/ovsdb-server.pid +#openvswitch_logfile=/usr/local/var/run/openvswitch/ovsdb-server.log +openvswitch_database=/usr/local/etc/openvswitch/conf.db +openvswitch_pidfile=/usr/local/var/run/openvswitch/ovsdb-server.pid +openvswitch_logfile=/usr/local/var/run/openvswitch/ovsdb-server.log + +# Create openvswitch database +ovsdb-tool create $openvswitch_database /usr/share/openvswitch/vswitch.ovsschema + +install -D -m 644 /proc/self/fd/0 <<'EOF' /var/openstack/openvswitch-setup +Openvswitch setup: success +EOF + +exit 0 diff --git a/openstack/usr/share/openstack/postgres-setup b/openstack/usr/share/openstack/postgres-setup new file mode 100644 index 00000000..fb224fd8 --- /dev/null +++ b/openstack/usr/share/openstack/postgres-setup @@ -0,0 +1,42 @@ +#!/bin/sh +# +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +if [ -f /var/openstack/postgres-setup ]; then + exit 0 +fi + +# Create postgres directories +install -dm700 /var/lib/pgsql/data +install -dm755 /var/run/postgresql + +# Create required system users and groups +getent group postgress >/dev/null || groupadd -r -g 41 postgres +getent passwd postgres >/dev/null || \ + useradd --uid 41 -r -g postgres -d /var/lib/pgsql -s /sbin/nologin \ + -c "PostgreSQL Server" postgres + +chown -R postgres:postgres /var/lib/pgsql /var/run/postgresql + +test -d /var/lib/pgsql/data/base || sudo -u postgres pg_ctl -D /var/lib/pgsql/data initdb + +install -D -m 644 /proc/self/fd/0 <<'EOF' /var/openstack/postgres-setup +Postgres setup: success +EOF + +exit 0 diff --git a/openvswitch.configure b/openvswitch.configure new file mode 100644 index 00000000..be4eb6d3 --- /dev/null +++ b/openvswitch.configure @@ -0,0 +1,30 @@ +#!/bin/bash + +# Copyright (C) 2015 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +services=("openvswitch-setup.service" \ + "openvswitch-db-server.service" \ + "openvswitch-initialize-db.service" \ + "openvswitch.service") + +for service in ${services[@]}; do + ln -sf "/etc/systemd/system/$service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/$service" +done diff --git a/postgres.configure b/postgres.configure new file mode 100644 index 00000000..257cf56c --- /dev/null +++ b/postgres.configure @@ -0,0 +1,23 @@ +#!/bin/sh + +# Copyright (C) 2014 Codethink Limited +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +set -e + +ROOT="$1" + +ln -sf "/etc/systemd/system/postgres-server.service" \ + "$ROOT/etc/systemd/system/multi-user.target.wants/postgres-server.service" -- cgit v1.2.1