summaryrefslogtreecommitdiff
path: root/install-files
diff options
context:
space:
mode:
Diffstat (limited to 'install-files')
-rw-r--r--install-files/chef/manifest3
-rwxr-xr-xinstall-files/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator16
-rw-r--r--install-files/distbuild/manifest28
-rw-r--r--install-files/distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml115
-rw-r--r--install-files/distbuild/usr/lib/distbuild-setup/ansible/hosts1
-rw-r--r--install-files/distbuild/usr/lib/systemd/system/distbuild-setup.service16
-rw-r--r--install-files/distbuild/usr/lib/systemd/system/morph-cache-server.service12
-rw-r--r--install-files/distbuild/usr/lib/systemd/system/morph-controller-helper.service13
-rw-r--r--install-files/distbuild/usr/lib/systemd/system/morph-controller.service12
-rw-r--r--install-files/distbuild/usr/lib/systemd/system/morph-worker-helper.service13
-rw-r--r--install-files/distbuild/usr/lib/systemd/system/morph-worker.service13
l---------install-files/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service1
-rw-r--r--install-files/distbuild/usr/share/distbuild-setup/morph-cache-server.conf5
-rw-r--r--install-files/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf5
-rw-r--r--install-files/distbuild/usr/share/distbuild-setup/morph-controller.conf6
-rw-r--r--install-files/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf4
-rw-r--r--install-files/distbuild/usr/share/distbuild-setup/morph-worker.conf4
-rw-r--r--install-files/distbuild/usr/share/distbuild-setup/morph.conf13
-rw-r--r--install-files/essential-files/etc/inputrc38
-rw-r--r--install-files/essential-files/etc/os-release5
-rw-r--r--install-files/essential-files/etc/profile13
-rw-r--r--install-files/essential-files/manifest8
-rw-r--r--install-files/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf4
l---------install-files/genivi-devel-system-armv7/etc/morph.conf1
-rw-r--r--install-files/genivi-devel-system-armv7/manifest5
-rw-r--r--install-files/genivi-devel-system-armv7/src/morph.conf5
-rw-r--r--install-files/moonshot/boot/m400-1003.dtbbin0 -> 18063 bytes
-rw-r--r--install-files/moonshot/manifest2
-rw-r--r--install-files/openstack/etc/horizon/apache-horizon.conf34
-rw-r--r--install-files/openstack/etc/horizon/openstack_dashboard/local_settings.py551
-rw-r--r--install-files/openstack/etc/tempest/tempest.conf1116
-rw-r--r--install-files/openstack/manifest190
-rw-r--r--install-files/openstack/usr/lib/sysctl.d/neutron.conf3
-rw-r--r--install-files/openstack/usr/lib/systemd/system/apache-httpd.service16
-rw-r--r--install-files/openstack/usr/lib/systemd/system/iscsi-setup.service12
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service11
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service13
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-cinder-api.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-cinder-backup.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service11
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service13
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service12
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-cinder-volume.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-glance-api.service16
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-glance-registry.service16
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-glance-setup.service11
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-horizon-setup.service10
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ironic-api.service16
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service16
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-ironic-setup.service12
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-keystone-setup.service14
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-keystone.service16
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-network-setup.service12
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service13
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service13
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service17
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service18
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service17
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service18
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service17
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-neutron-server.service17
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-api.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-cert.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-compute.service16
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-conductor.service16
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service11
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service13
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service15
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openvswitch-db-server.service12
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openvswitch-setup.service11
-rw-r--r--install-files/openstack/usr/lib/systemd/system/openvswitch.service12
-rw-r--r--install-files/openstack/usr/lib/systemd/system/postgres-server-setup.service12
-rw-r--r--install-files/openstack/usr/lib/systemd/system/postgres-server.service26
-rw-r--r--install-files/openstack/usr/lib/systemd/system/rabbitmq-server.service16
-rw-r--r--install-files/openstack/usr/lib/systemd/system/swift-controller-setup.service13
-rw-r--r--install-files/openstack/usr/lib/systemd/system/swift-proxy.service14
-rw-r--r--install-files/openstack/usr/share/openstack/ceilometer-config.yml36
-rw-r--r--install-files/openstack/usr/share/openstack/ceilometer-db.yml50
-rw-r--r--install-files/openstack/usr/share/openstack/ceilometer/ceilometer.conf1023
-rw-r--r--install-files/openstack/usr/share/openstack/cinder-config.yml37
-rw-r--r--install-files/openstack/usr/share/openstack/cinder-db.yml60
-rw-r--r--install-files/openstack/usr/share/openstack/cinder-lvs.yml21
-rw-r--r--install-files/openstack/usr/share/openstack/cinder/api-paste.ini60
-rw-r--r--install-files/openstack/usr/share/openstack/cinder/cinder.conf2825
-rw-r--r--install-files/openstack/usr/share/openstack/cinder/policy.json80
-rw-r--r--install-files/openstack/usr/share/openstack/extras/00-disable-device.network2
-rw-r--r--install-files/openstack/usr/share/openstack/extras/60-device-dhcp.network5
-rw-r--r--install-files/openstack/usr/share/openstack/glance.yml93
-rw-r--r--install-files/openstack/usr/share/openstack/glance/glance-api-paste.ini77
-rw-r--r--install-files/openstack/usr/share/openstack/glance/glance-api.conf699
-rw-r--r--install-files/openstack/usr/share/openstack/glance/glance-cache.conf200
-rw-r--r--install-files/openstack/usr/share/openstack/glance/glance-registry-paste.ini30
-rw-r--r--install-files/openstack/usr/share/openstack/glance/glance-registry.conf245
-rw-r--r--install-files/openstack/usr/share/openstack/glance/glance-scrubber.conf108
-rw-r--r--install-files/openstack/usr/share/openstack/glance/logging.conf54
-rw-r--r--install-files/openstack/usr/share/openstack/glance/policy.json52
-rw-r--r--install-files/openstack/usr/share/openstack/glance/schema-image.json28
-rw-r--r--install-files/openstack/usr/share/openstack/horizon.yml47
-rw-r--r--install-files/openstack/usr/share/openstack/hosts1
-rw-r--r--install-files/openstack/usr/share/openstack/ironic.yml104
-rw-r--r--install-files/openstack/usr/share/openstack/ironic/ironic.conf1247
-rw-r--r--install-files/openstack/usr/share/openstack/ironic/policy.json5
-rw-r--r--install-files/openstack/usr/share/openstack/iscsi.yml15
-rw-r--r--install-files/openstack/usr/share/openstack/keystone.yml143
-rw-r--r--install-files/openstack/usr/share/openstack/keystone/keystone-paste.ini121
-rw-r--r--install-files/openstack/usr/share/openstack/keystone/keystone.conf1588
-rw-r--r--install-files/openstack/usr/share/openstack/keystone/logging.conf65
-rw-r--r--install-files/openstack/usr/share/openstack/keystone/policy.json171
-rw-r--r--install-files/openstack/usr/share/openstack/network.yml67
-rw-r--r--install-files/openstack/usr/share/openstack/neutron-config.yml48
-rw-r--r--install-files/openstack/usr/share/openstack/neutron-db.yml51
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/api-paste.ini30
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/dhcp_agent.ini89
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/fwaas_driver.ini3
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/l3_agent.ini103
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/lbaas_agent.ini42
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/metadata_agent.ini60
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/metering_agent.ini18
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/neutron.conf640
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini114
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README3
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README6
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini29
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini15
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini100
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini76
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini26
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini41
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini63
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini50
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini78
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini31
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini19
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini86
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini100
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini15
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini118
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini52
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini4
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini28
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini30
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini13
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini31
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini79
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini60
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini41
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini35
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini26
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini190
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini14
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini44
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini200
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/policy.json138
-rw-r--r--install-files/openstack/usr/share/openstack/neutron/vpn_agent.ini14
-rw-r--r--install-files/openstack/usr/share/openstack/nova-config.yml34
-rw-r--r--install-files/openstack/usr/share/openstack/nova-db.yml51
-rw-r--r--install-files/openstack/usr/share/openstack/nova/api-paste.ini118
-rw-r--r--install-files/openstack/usr/share/openstack/nova/cells.json26
-rw-r--r--install-files/openstack/usr/share/openstack/nova/logging.conf81
-rw-r--r--install-files/openstack/usr/share/openstack/nova/nova-compute.conf4
-rw-r--r--install-files/openstack/usr/share/openstack/nova/nova.conf3809
-rw-r--r--install-files/openstack/usr/share/openstack/nova/policy.json324
-rw-r--r--install-files/openstack/usr/share/openstack/openvswitch.yml38
-rw-r--r--install-files/openstack/usr/share/openstack/postgres.yml48
-rw-r--r--install-files/openstack/usr/share/openstack/postgres/pg_hba.conf5
-rw-r--r--install-files/openstack/usr/share/openstack/postgres/postgresql.conf11
-rw-r--r--install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq-env.conf3
-rw-r--r--install-files/openstack/usr/share/openstack/rabbitmq/rabbitmq.config9
-rw-r--r--install-files/openstack/usr/share/openstack/swift-controller.yml52
-rw-r--r--install-files/openstack/usr/share/swift/etc/rsyncd.j223
-rw-r--r--install-files/openstack/usr/share/swift/etc/swift/proxy-server.j2630
-rw-r--r--install-files/swift/etc/ntp.conf25
-rw-r--r--install-files/swift/manifest15
-rw-r--r--install-files/swift/usr/lib/systemd/system/rsync.service11
-rw-r--r--install-files/swift/usr/lib/systemd/system/swift-storage-setup.service12
-rw-r--r--install-files/swift/usr/lib/systemd/system/swift-storage.service12
-rw-r--r--install-files/swift/usr/share/swift/etc/rsyncd.j223
-rw-r--r--install-files/swift/usr/share/swift/etc/swift/account-server.j2192
-rw-r--r--install-files/swift/usr/share/swift/etc/swift/container-server.j2203
-rw-r--r--install-files/swift/usr/share/swift/etc/swift/object-server.j2283
-rw-r--r--install-files/swift/usr/share/swift/etc/swift/swift.j2118
-rw-r--r--install-files/swift/usr/share/swift/hosts1
-rw-r--r--install-files/swift/usr/share/swift/swift-storage.yml24
-rw-r--r--install-files/vagrant-files/home/vagrant/.ssh/authorized_keys1
-rw-r--r--install-files/vagrant-files/manifest4
196 files changed, 21641 insertions, 0 deletions
diff --git a/install-files/chef/manifest b/install-files/chef/manifest
new file mode 100644
index 00000000..de6cc542
--- /dev/null
+++ b/install-files/chef/manifest
@@ -0,0 +1,3 @@
+0040755 0 0 /root
+0040700 1000 1000 /root/.ssh
+0100600 1000 1000 /root/.ssh/authorized_keys
diff --git a/install-files/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator b/install-files/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator
new file mode 100755
index 00000000..127bc84f
--- /dev/null
+++ b/install-files/distbuild/lib/systemd/system-generators/ccache-nfs-mount-generator
@@ -0,0 +1,16 @@
+#!/bin/sh
+read trove_host </etc/trove-host
+while read line; do echo "$line"; done >"$1/srv-distbuild-ccache.mount" <<EOF
+[Unit]
+Requires=rpcbind.service
+After=rpcbind.service
+SourcePath=/etc/trove-host
+
+[Mount]
+Type=nfs
+What=$trove_host:/home/cache/ccache
+Where=/srv/distbuild/ccache
+
+[Install]
+WantedBy=morph-worker.service
+EOF
diff --git a/install-files/distbuild/manifest b/install-files/distbuild/manifest
new file mode 100644
index 00000000..9363fa85
--- /dev/null
+++ b/install-files/distbuild/manifest
@@ -0,0 +1,28 @@
+0040755 0 0 /lib
+0040755 0 0 /lib/systemd
+0040755 0 0 /lib/systemd/system-generators
+0100755 0 0 /lib/systemd/system-generators/ccache-nfs-mount-generator
+0040755 0 0 /usr
+0040755 0 0 /usr/lib
+0040755 0 0 /usr/lib/distbuild-setup
+0040755 0 0 /usr/lib/distbuild-setup/ansible
+0100644 0 0 /usr/lib/distbuild-setup/ansible/hosts
+0100644 0 0 /usr/lib/distbuild-setup/ansible/distbuild-setup.yml
+0040755 0 0 /usr/lib/systemd
+0040755 0 0 /usr/lib/systemd/system
+0100644 0 0 /usr/lib/systemd/system/morph-cache-server.service
+0100644 0 0 /usr/lib/systemd/system/morph-controller.service
+0100644 0 0 /usr/lib/systemd/system/morph-controller-helper.service
+0100644 0 0 /usr/lib/systemd/system/morph-worker.service
+0100644 0 0 /usr/lib/systemd/system/morph-worker-helper.service
+0100644 0 0 /usr/lib/systemd/system/distbuild-setup.service
+0040755 0 0 /usr/lib/systemd/system/multi-user.target.wants
+0120644 0 0 /usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service
+0040755 0 0 /usr/share
+0040755 0 0 /usr/share/distbuild-setup
+0100644 0 0 /usr/share/distbuild-setup/morph.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-cache-server.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-controller.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-controller-helper.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-worker.conf
+0100644 0 0 /usr/share/distbuild-setup/morph-worker-helper.conf
diff --git a/install-files/distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml b/install-files/distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml
new file mode 100644
index 00000000..c3074c63
--- /dev/null
+++ b/install-files/distbuild/usr/lib/distbuild-setup/ansible/distbuild-setup.yml
@@ -0,0 +1,115 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/distbuild/distbuild.conf"
+ tasks:
+
+ - set_fact: ARTIFACT_CACHE_SERVER={{ TROVE_HOST }}
+ when: ARTIFACT_CACHE_SERVER is not defined
+
+ - name: Create mountpoint for extra disk space /srv/distbuild/
+ file: path=/srv/distbuild state=directory owner=root group=root mode=0755
+
+ - name: Create the morph and morph-cache-server configuration files
+ template: src=/usr/share/distbuild-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - morph.conf
+ - morph-cache-server.conf
+
+ - name: Link the morph log file
+ file: src=/srv/distbuild/morph.log dest=/var/log/morph.log state=link force=yes
+
+ - name: Create the controller configuration files
+ template: src=/usr/share/distbuild-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - morph-controller.conf
+ - morph-controller-helper.conf
+ when: DISTBUILD_CONTROLLER
+
+ - name: Link the controller log files
+ file: src=/srv/distbuild/{{ item }} dest=/var/log/{{ item }} state=link force=yes
+ with_items:
+ - morph-controller.log
+ - morph-controller-helper.log
+ when: DISTBUILD_CONTROLLER
+
+ - name: Create the worker configuration files
+ template: src=/usr/share/distbuild-setup/{{ item }} dest=/etc/{{ item }}
+ with_items:
+ - morph-worker.conf
+ - morph-worker-helper.conf
+ when: DISTBUILD_WORKER
+
+ - name: Link the controller log files
+ file: src=/srv/distbuild/{{ item }} dest=/var/log/{{ item }} state=link force=yes
+ with_items:
+ - morph-worker.log
+ - morph-worker-helper.log
+ when: DISTBUILD_WORKER
+
+ - name: Create /root/.ssh directory
+ file: path=/root/.ssh state=directory owner=root group=root mode=0700
+
+ - name: Copy the worker ssh key
+ copy: src={{ WORKER_SSH_KEY }} dest=/root/.ssh/id_rsa owner=root group=root mode=0600
+
+ - name: Create ssh public key
+ shell: ssh-keygen -y -f /root/.ssh/id_rsa > /root/.ssh/id_rsa.pub creates=/root/.ssh/id_rsa.pub
+
+ - name: Add trove's host key
+ shell: |
+ trove_key="$(ssh-keyscan -t dsa,ecdsa,rsa {{ TROVE_HOST|quote }})"
+ if [ -n "$trove_key" ]; then
+ echo "$trove_key" > /etc/ssh/ssh_known_hosts
+ fi
+ creates=/etc/ssh/ssh_known_hosts
+
+ # This is a kludge. We can add the host key for the TROVE_HOST that was
+ # specified, but users may access the Trove by other names, e.g. IP address
+ # or domain name. Distbuild is currently not safe to run except on a private
+ # network where host key checking is not important, so we disable it by
+ # default to avoid errors when users don't stick to using the exact same
+ # TROVE_HOST in repo URLs.
+ - name: Disable strict SSH host key checking
+ lineinfile:
+ dest: /etc/ssh/ssh_config
+ line: StrictHostKeyChecking no
+
+ - name: Enable the morph-cache-server service
+ service: name=morph-cache-server.service enabled=yes
+ register: morph_cache_server_service
+ - name: Restart the morph-cache-server service
+ service: name=morph-cache-server state=restarted
+ when: morph_cache_server_service|changed
+
+ - name: Enable the morph-worker service
+ service: name=morph-worker.service enabled=yes
+ register: morph_worker_service
+ when: DISTBUILD_WORKER
+ - name: Restart the morph-worker service
+ service: name=morph-worker state=restarted
+ when: morph_worker_service|changed
+
+ - name: Enable the morph-worker-helper service
+ service: name=morph-worker-helper.service enabled=yes
+ register: morph_worker_helper_service
+ when: DISTBUILD_WORKER
+ - name: Restart the morph-worker-helper service
+ service: name=morph-worker-helper state=restarted
+ when: morph_worker_helper_service|changed
+
+ - name: Enable the morph-controller service
+ service: name=morph-controller.service enabled=yes
+ register: morph_controller_service
+ when: DISTBUILD_CONTROLLER
+ - name: Restart the morph-controller service
+ service: name=morph-controller state=restarted
+ when: morph_controller_service|changed
+
+ - name: Enable the morph-controller-helper service
+ service: name=morph-controller-helper.service enabled=yes
+ register: morph_controller_helper_service
+ when: DISTBUILD_CONTROLLER
+ - name: Restart the morph-controller-helper service
+ service: name=morph-controller-helper state=restarted
+ when: morph_controller_helper_service|changed
diff --git a/install-files/distbuild/usr/lib/distbuild-setup/ansible/hosts b/install-files/distbuild/usr/lib/distbuild-setup/ansible/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/install-files/distbuild/usr/lib/distbuild-setup/ansible/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/install-files/distbuild/usr/lib/systemd/system/distbuild-setup.service b/install-files/distbuild/usr/lib/systemd/system/distbuild-setup.service
new file mode 100644
index 00000000..ec5f5a2d
--- /dev/null
+++ b/install-files/distbuild/usr/lib/systemd/system/distbuild-setup.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Run distbuild-setup Ansible scripts
+Requires=network.target
+After=network.target
+Requires=opensshd.service
+After=opensshd.service
+
+# If there's a shared /var subvolume, it must be mounted before this
+# unit runs.
+Requires=local-fs.target
+After=local-fs.target
+
+ConditionPathExists=/etc/distbuild/distbuild.conf
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/lib/distbuild-setup/ansible/hosts /usr/lib/distbuild-setup/ansible/distbuild-setup.yml
diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-cache-server.service b/install-files/distbuild/usr/lib/systemd/system/morph-cache-server.service
new file mode 100644
index 00000000..f55f3b6d
--- /dev/null
+++ b/install-files/distbuild/usr/lib/systemd/system/morph-cache-server.service
@@ -0,0 +1,12 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph cache server
+Requires=local-fs.target network.target
+After=local-fs.target network.target
+ConditionPathExists=/etc/morph-cache-server.conf
+
+[Service]
+ExecStart=/usr/bin/morph-cache-server
+Restart=always
diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-controller-helper.service b/install-files/distbuild/usr/lib/systemd/system/morph-controller-helper.service
new file mode 100644
index 00000000..3f30cbcf
--- /dev/null
+++ b/install-files/distbuild/usr/lib/systemd/system/morph-controller-helper.service
@@ -0,0 +1,13 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph distributed build controller helper
+Requires=morph-controller.service
+After=morph-controller.service
+ConditionPathExists=/etc/morph-controller.conf
+ConditionPathExists=/etc/morph-controller-helper.conf
+
+[Service]
+ExecStart=/usr/bin/distbuild-helper --config /etc/morph-controller-helper.conf
+Restart=always
diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-controller.service b/install-files/distbuild/usr/lib/systemd/system/morph-controller.service
new file mode 100644
index 00000000..1556d232
--- /dev/null
+++ b/install-files/distbuild/usr/lib/systemd/system/morph-controller.service
@@ -0,0 +1,12 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph distributed build controller
+Requires=local-fs.target network.target
+After=local-fs.target network.target
+ConditionPathExists=/etc/morph-controller.conf
+
+[Service]
+ExecStart=/usr/bin/morph controller-daemon --config /etc/morph-controller.conf
+Restart=always
diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-worker-helper.service b/install-files/distbuild/usr/lib/systemd/system/morph-worker-helper.service
new file mode 100644
index 00000000..28400701
--- /dev/null
+++ b/install-files/distbuild/usr/lib/systemd/system/morph-worker-helper.service
@@ -0,0 +1,13 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph distributed build worker helper
+Requires=morph-worker.service
+After=morph-worker.service
+ConditionPathExists=/etc/morph-worker.conf
+ConditionPathExists=/etc/morph-worker-helper.conf
+
+[Service]
+ExecStart=/usr/bin/distbuild-helper --config /etc/morph-worker-helper.conf
+Restart=always
diff --git a/install-files/distbuild/usr/lib/systemd/system/morph-worker.service b/install-files/distbuild/usr/lib/systemd/system/morph-worker.service
new file mode 100644
index 00000000..90fea404
--- /dev/null
+++ b/install-files/distbuild/usr/lib/systemd/system/morph-worker.service
@@ -0,0 +1,13 @@
+[Install]
+WantedBy=multi-user.target
+
+[Unit]
+Description=Morph distributed build worker
+Requires=local-fs.target network.target
+Wants=srv-distbuild-ccache.mount
+After=local-fs.target network.target srv-distbuild-ccache.mount
+ConditionPathExists=/etc/morph-worker.conf
+
+[Service]
+ExecStart=/usr/bin/morph worker-daemon --config /etc/morph-worker.conf
+Restart=always
diff --git a/install-files/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service b/install-files/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service
new file mode 120000
index 00000000..8f06febd
--- /dev/null
+++ b/install-files/distbuild/usr/lib/systemd/system/multi-user.target.wants/distbuild-setup.service
@@ -0,0 +1 @@
+../distbuild-setup.service \ No newline at end of file
diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-cache-server.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-cache-server.conf
new file mode 100644
index 00000000..b9020e7d
--- /dev/null
+++ b/install-files/distbuild/usr/share/distbuild-setup/morph-cache-server.conf
@@ -0,0 +1,5 @@
+[config]
+port = 8080
+artifact-dir = /srv/distbuild/artifacts
+direct-mode = True
+fcgi-server = False
diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf
new file mode 100644
index 00000000..99d38739
--- /dev/null
+++ b/install-files/distbuild/usr/share/distbuild-setup/morph-controller-helper.conf
@@ -0,0 +1,5 @@
+[config]
+log = /srv/distbuild/morph-controller-helper.log
+log-max = 100M
+parent-port = 5656
+parent-address = 127.0.0.1
diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-controller.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-controller.conf
new file mode 100644
index 00000000..c16c0343
--- /dev/null
+++ b/install-files/distbuild/usr/share/distbuild-setup/morph-controller.conf
@@ -0,0 +1,6 @@
+[config]
+log = /srv/distbuild/morph-controller.log
+log-max = 100M
+writeable-cache-server = http://{{ ARTIFACT_CACHE_SERVER }}:8081/
+worker = {{ WORKERS }}
+controller-helper-address = 127.0.0.1
diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf
new file mode 100644
index 00000000..29d4ef3f
--- /dev/null
+++ b/install-files/distbuild/usr/share/distbuild-setup/morph-worker-helper.conf
@@ -0,0 +1,4 @@
+[config]
+log = /srv/distbuild/morph-worker-helper.log
+log-max = 100M
+parent-address = 127.0.0.1
diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph-worker.conf b/install-files/distbuild/usr/share/distbuild-setup/morph-worker.conf
new file mode 100644
index 00000000..fb382bad
--- /dev/null
+++ b/install-files/distbuild/usr/share/distbuild-setup/morph-worker.conf
@@ -0,0 +1,4 @@
+[config]
+log = /srv/distbuild/morph-worker.log
+log-max = 100M
+controller-initiator-address =
diff --git a/install-files/distbuild/usr/share/distbuild-setup/morph.conf b/install-files/distbuild/usr/share/distbuild-setup/morph.conf
new file mode 100644
index 00000000..29de684c
--- /dev/null
+++ b/install-files/distbuild/usr/share/distbuild-setup/morph.conf
@@ -0,0 +1,13 @@
+[config]
+log = /srv/distbuild/morph.log
+log-max = 100M
+cachedir = /srv/distbuild
+tempdir = /srv/distbuild/tmp
+trove-host = {{ TROVE_HOST }}
+trove-id = {{ TROVE_ID }}
+controller-initiator-address = {{ CONTROLLERHOST }}
+tempdir-min-space = 4G
+cachedir-min-space = 4G
+build-ref-prefix = {{ TROVE_ID }}
+artifact-cache-server = http://{{ ARTIFACT_CACHE_SERVER }}:8080/
+git-resolve-cache-server = http://{{ TROVE_HOST }}:8080/
diff --git a/install-files/essential-files/etc/inputrc b/install-files/essential-files/etc/inputrc
new file mode 100644
index 00000000..ddee44cd
--- /dev/null
+++ b/install-files/essential-files/etc/inputrc
@@ -0,0 +1,38 @@
+# Allow the command prompt to wrap to the next line
+set horizontal-scroll-mode Off
+
+# Enable 8bit input
+set meta-flag On
+set input-meta On
+
+# Turns off 8th bit stripping
+set convert-meta Off
+
+# Keep the 8th bit for display
+set output-meta On
+
+# none, visible or audible
+set bell-style none
+
+# for linux console and RH/Debian xterm
+"\e[1~": beginning-of-line
+"\e[4~": end-of-line
+"\e[5~": beginning-of-history
+"\e[6~": end-of-history
+"\e[7~": beginning-of-line
+"\e[3~": delete-char
+"\e[2~": quoted-insert
+"\e[5C": forward-word
+"\e[5D": backward-word
+"\e\e[C": forward-word
+"\e\e[D": backward-word
+"\e[1;5C": forward-word
+"\e[1;5D": backward-word
+
+# for non RH/Debian xterm, can't hurt for RH/DEbian xterm
+"\eOH": beginning-of-line
+"\eOF": end-of-line
+
+# for Konsole and freebsd console
+"\e[H": beginning-of-line
+"\e[F": end-of-line
diff --git a/install-files/essential-files/etc/os-release b/install-files/essential-files/etc/os-release
new file mode 100644
index 00000000..b729c75f
--- /dev/null
+++ b/install-files/essential-files/etc/os-release
@@ -0,0 +1,5 @@
+NAME="Baserock"
+ID=baserock
+HOME_URL="http://wiki.baserock.org"
+SUPPORT_URL="http://wiki.baserock.org/mailinglist"
+BUG_REPORT_URL="http://wiki.baserock.org/mailinglist"
diff --git a/install-files/essential-files/etc/profile b/install-files/essential-files/etc/profile
new file mode 100644
index 00000000..b306a132
--- /dev/null
+++ b/install-files/essential-files/etc/profile
@@ -0,0 +1,13 @@
+# /etc/profile
+
+# Set our default path
+PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+export PATH
+
+# Source global bash config
+if test "$PS1" && test "$BASH" && test -r /etc/bash.bashrc; then
+ . /etc/bash.bashrc
+fi
+
+# Set default pager to less
+export MANPAGER='less -R'
diff --git a/install-files/essential-files/manifest b/install-files/essential-files/manifest
new file mode 100644
index 00000000..2b77c237
--- /dev/null
+++ b/install-files/essential-files/manifest
@@ -0,0 +1,8 @@
+0040755 0 0 /etc
+overwrite 0100644 0 0 /etc/os-release
+overwrite 0100644 0 0 /etc/profile
+overwrite 0100644 0 0 /etc/inputrc
+0040755 0 0 /usr
+0040755 0 0 /usr/lib
+0040755 0 0 /usr/lib/tmpfiles.d
+0100644 0 0 /usr/lib/tmpfiles.d/shutdownramfs.conf
diff --git a/install-files/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf b/install-files/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf
new file mode 100644
index 00000000..174f1f03
--- /dev/null
+++ b/install-files/essential-files/usr/lib/tmpfiles.d/shutdownramfs.conf
@@ -0,0 +1,4 @@
+# If /usr/lib/shutdownramfs has been populated, copy it into /run/initramfs so
+# /run/initramfs/shutdown will be executed on shut-down, so that it may unmount
+# the rootfs.
+C /run/initramfs - - - - /usr/lib/shutdownramfs
diff --git a/install-files/genivi-devel-system-armv7/etc/morph.conf b/install-files/genivi-devel-system-armv7/etc/morph.conf
new file mode 120000
index 00000000..8f384049
--- /dev/null
+++ b/install-files/genivi-devel-system-armv7/etc/morph.conf
@@ -0,0 +1 @@
+/src/morph.conf \ No newline at end of file
diff --git a/install-files/genivi-devel-system-armv7/manifest b/install-files/genivi-devel-system-armv7/manifest
new file mode 100644
index 00000000..31980633
--- /dev/null
+++ b/install-files/genivi-devel-system-armv7/manifest
@@ -0,0 +1,5 @@
+0040755 0 0 /src
+0040755 0 0 /src/tmp
+0100666 0 0 /src/morph.conf
+0040755 0 0 /etc
+0120666 0 0 /etc/morph.conf
diff --git a/install-files/genivi-devel-system-armv7/src/morph.conf b/install-files/genivi-devel-system-armv7/src/morph.conf
new file mode 100644
index 00000000..76b6fde9
--- /dev/null
+++ b/install-files/genivi-devel-system-armv7/src/morph.conf
@@ -0,0 +1,5 @@
+[config]
+log = /src/morph.log
+cachedir = /src/cache
+tempdir = /src/tmp
+staging-chroot = true
diff --git a/install-files/moonshot/boot/m400-1003.dtb b/install-files/moonshot/boot/m400-1003.dtb
new file mode 100644
index 00000000..d6fd83ee
--- /dev/null
+++ b/install-files/moonshot/boot/m400-1003.dtb
Binary files differ
diff --git a/install-files/moonshot/manifest b/install-files/moonshot/manifest
new file mode 100644
index 00000000..dd80fe49
--- /dev/null
+++ b/install-files/moonshot/manifest
@@ -0,0 +1,2 @@
+0040755 0 0 /boot
+0100744 0 0 /boot/m400-1003.dtb
diff --git a/install-files/openstack/etc/horizon/apache-horizon.conf b/install-files/openstack/etc/horizon/apache-horizon.conf
new file mode 100644
index 00000000..ea88897a
--- /dev/null
+++ b/install-files/openstack/etc/horizon/apache-horizon.conf
@@ -0,0 +1,34 @@
+<VirtualHost *:80>
+ WSGIScriptAlias /horizon /var/lib/horizon/openstack_dashboard/django.wsgi
+ WSGIDaemonProcess horizon user=horizon group=horizon processes=3 threads=10 home=/var/lib/horizon display-name=horizon
+ WSGIApplicationGroup %{GLOBAL}
+
+ RedirectMatch ^/$ /horizon/
+
+ SetEnv APACHE_RUN_USER apache
+ SetEnv APACHE_RUN_GROUP apache
+ WSGIProcessGroup horizon
+
+ DocumentRoot /var/lib/horizon/.blackhole
+ Alias /static /var/lib/horizon/openstack_dashboard/static
+
+ <Directory /var/lib/horizon/openstack_dashboard >
+ Options Indexes FollowSymLinks MultiViews
+ AllowOverride None
+ # Apache 2.4 uses mod_authz_host for access control now (instead of
+ # "Allow")
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ </Directory>
+
+ ErrorLog /var/log/httpd/horizon_error.log
+ LogLevel warn
+ CustomLog /var/log/httpd/horizon_access.log combined
+</VirtualHost>
+
+WSGISocketPrefix /var/run/httpd
diff --git a/install-files/openstack/etc/horizon/openstack_dashboard/local_settings.py b/install-files/openstack/etc/horizon/openstack_dashboard/local_settings.py
new file mode 100644
index 00000000..febc3e70
--- /dev/null
+++ b/install-files/openstack/etc/horizon/openstack_dashboard/local_settings.py
@@ -0,0 +1,551 @@
+import os
+
+from django.utils.translation import ugettext_lazy as _
+
+from openstack_dashboard import exceptions
+
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+
+STATIC_ROOT = "/var/lib/horizon/openstack_dashboard/static"
+
+# Required for Django 1.5.
+# If horizon is running in production (DEBUG is False), set this
+# with the list of host/domain names that the application can serve.
+# For more information see:
+# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
+#ALLOWED_HOSTS = ['horizon.example.com', ]
+ALLOWED_HOSTS = ['*']
+
+# Set SSL proxy settings:
+# For Django 1.4+ pass this header from the proxy after terminating the SSL,
+# and don't forget to strip it from the client's request.
+# For more information see:
+# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
+# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
+
+# If Horizon is being served through SSL, then uncomment the following two
+# settings to better secure the cookies from security exploits
+#CSRF_COOKIE_SECURE = True
+#SESSION_COOKIE_SECURE = True
+
+# Overrides for OpenStack API versions. Use this setting to force the
+# OpenStack dashboard to use a specific API version for a given service API.
+# NOTE: The version should be formatted as it appears in the URL for the
+# service API. For example, The identity service APIs have inconsistent
+# use of the decimal point, so valid options would be "2.0" or "3".
+# OPENSTACK_API_VERSIONS = {
+# "data_processing": 1.1,
+# "identity": 3,
+# "volume": 2
+# }
+
+# Set this to True if running on multi-domain model. When this is enabled, it
+# will require user to enter the Domain name in addition to username for login.
+# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
+
+# Overrides the default domain used when running on single-domain model
+# with Keystone V3. All entities will be created in the default domain.
+# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
+
+# Set Console type:
+# valid options would be "AUTO"(default), "VNC", "SPICE", "RDP" or None
+# Set to None explicitly if you want to deactivate the console.
+# CONSOLE_TYPE = "AUTO"
+
+# Default OpenStack Dashboard configuration.
+HORIZON_CONFIG = {
+ 'user_home': 'openstack_dashboard.views.get_user_home',
+ 'ajax_queue_limit': 10,
+ 'auto_fade_alerts': {
+ 'delay': 3000,
+ 'fade_duration': 1500,
+ 'types': ['alert-success', 'alert-info']
+ },
+ 'help_url': "http://docs.openstack.org",
+ 'exceptions': {'recoverable': exceptions.RECOVERABLE,
+ 'not_found': exceptions.NOT_FOUND,
+ 'unauthorized': exceptions.UNAUTHORIZED},
+ 'modal_backdrop': 'static',
+ 'angular_modules': [],
+ 'js_files': [],
+}
+
+# Specify a regular expression to validate user passwords.
+# HORIZON_CONFIG["password_validator"] = {
+# "regex": '.*',
+# "help_text": _("Your password does not meet the requirements.")
+# }
+
+# Disable simplified floating IP address management for deployments with
+# multiple floating IP pools or complex network requirements.
+# HORIZON_CONFIG["simple_ip_management"] = False
+
+# Turn off browser autocompletion for forms including the login form and
+# the database creation workflow if so desired.
+# HORIZON_CONFIG["password_autocomplete"] = "off"
+
+# Setting this to True will disable the reveal button for password fields,
+# including on the login form.
+# HORIZON_CONFIG["disable_password_reveal"] = False
+
+#LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
+
+LOCAL_PATH = "/var/lib/horizon"
+
+# Set custom secret key:
+# You can either set it to a specific value or you can let horizon generate a
+# default secret key that is unique on this machine, e.i. regardless of the
+# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
+# may be situations where you would want to set this explicitly, e.g. when
+# multiple dashboard instances are distributed on different machines (usually
+# behind a load-balancer). Either you have to make sure that a session gets all
+# requests routed to the same dashboard instance or you set the same SECRET_KEY
+# for all of them.
+from horizon.utils import secret_key
+SECRET_KEY = secret_key.generate_or_read_from_file(
+ os.path.join(LOCAL_PATH, '.secret_key_store'))
+
+# We recommend you use memcached for development; otherwise after every reload
+# of the django development server, you will have to login again. To use
+# memcached set CACHES to something like
+CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION': '127.0.0.1:11211',
+ }
+}
+
+#CACHES = {
+# 'default': {
+# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
+# }
+#}
+
+# Send email to the console by default
+EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
+# Or send them to /dev/null
+#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
+
+# Configure these for your outgoing email host
+# EMAIL_HOST = 'smtp.my-company.com'
+# EMAIL_PORT = 25
+# EMAIL_HOST_USER = 'djangomail'
+# EMAIL_HOST_PASSWORD = 'top-secret!'
+
+# For multiple regions uncomment this configuration, and add (endpoint, title).
+# AVAILABLE_REGIONS = [
+# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
+# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
+# ]
+
+OPENSTACK_HOST = "127.0.0.1"
+OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
+OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
+
+# Disable SSL certificate checks (useful for self-signed certificates):
+# OPENSTACK_SSL_NO_VERIFY = True
+
+# The CA certificate to use to verify SSL connections
+# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
+
+# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
+# capabilities of the auth backend for Keystone.
+# If Keystone has been configured to use LDAP as the auth backend then set
+# can_edit_user to False and name to 'ldap'.
+#
+# TODO(tres): Remove these once Keystone has an API to identify auth backend.
+OPENSTACK_KEYSTONE_BACKEND = {
+ 'name': 'native',
+ 'can_edit_user': True,
+ 'can_edit_group': True,
+ 'can_edit_project': True,
+ 'can_edit_domain': True,
+ 'can_edit_role': True
+}
+
+#Setting this to True, will add a new "Retrieve Password" action on instance,
+#allowing Admin session password retrieval/decryption.
+#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
+
+# The Xen Hypervisor has the ability to set the mount point for volumes
+# attached to instances (other Hypervisors currently do not). Setting
+# can_set_mount_point to True will add the option to set the mount point
+# from the UI.
+OPENSTACK_HYPERVISOR_FEATURES = {
+ 'can_set_mount_point': False,
+ 'can_set_password': False,
+}
+
+# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
+# services provided by cinder that is not exposed by its extension API.
+OPENSTACK_CINDER_FEATURES = {
+ 'enable_backup': False,
+}
+
+# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
+# services provided by neutron. Options currently available are load
+# balancer service, security groups, quotas, VPN service.
+OPENSTACK_NEUTRON_NETWORK = {
+ 'enable_router': True,
+ 'enable_quotas': True,
+ 'enable_ipv6': True,
+ 'enable_distributed_router': False,
+ 'enable_ha_router': False,
+ 'enable_lb': True,
+ 'enable_firewall': True,
+ 'enable_vpn': True,
+ # The profile_support option is used to detect if an external router can be
+ # configured via the dashboard. When using specific plugins the
+ # profile_support can be turned on if needed.
+ 'profile_support': None,
+ #'profile_support': 'cisco',
+ # Set which provider network types are supported. Only the network types
+ # in this list will be available to choose from when creating a network.
+ # Network types include local, flat, vlan, gre, and vxlan.
+ 'supported_provider_types': ['*'],
+}
+
+# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
+# in the OpenStack Dashboard related to the Image service, such as the list
+# of supported image formats.
+# OPENSTACK_IMAGE_BACKEND = {
+# 'image_formats': [
+# ('', _('Select format')),
+# ('aki', _('AKI - Amazon Kernel Image')),
+# ('ami', _('AMI - Amazon Machine Image')),
+# ('ari', _('ARI - Amazon Ramdisk Image')),
+# ('iso', _('ISO - Optical Disk Image')),
+# ('qcow2', _('QCOW2 - QEMU Emulator')),
+# ('raw', _('Raw')),
+# ('vdi', _('VDI')),
+# ('vhd', _('VHD')),
+# ('vmdk', _('VMDK'))
+# ]
+# }
+
+# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
+# image custom property attributes that appear on image detail pages.
+IMAGE_CUSTOM_PROPERTY_TITLES = {
+ "architecture": _("Architecture"),
+ "kernel_id": _("Kernel ID"),
+ "ramdisk_id": _("Ramdisk ID"),
+ "image_state": _("Euca2ools state"),
+ "project_id": _("Project ID"),
+ "image_type": _("Image Type")
+}
+
+# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
+# custom properties should not be displayed in the Image Custom Properties
+# table.
+IMAGE_RESERVED_CUSTOM_PROPERTIES = []
+
+# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is 'publicURL'.
+#OPENSTACK_ENDPOINT_TYPE = "publicURL"
+
+# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
+# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is None. This
+# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
+#SECONDARY_ENDPOINT_TYPE = "publicURL"
+
+# The number of objects (Swift containers/objects or images) to display
+# on a single page before providing a paging element (a "more" link)
+# to paginate results.
+API_RESULT_LIMIT = 1000
+API_RESULT_PAGE_SIZE = 20
+
+# Specify a maximum number of items to display in a dropdown.
+DROPDOWN_MAX_ITEMS = 30
+
+# The timezone of the server. This should correspond with the timezone
+# of your entire OpenStack installation, and hopefully be in UTC.
+TIME_ZONE = "UTC"
+
+# When launching an instance, the menu of available flavors is
+# sorted by RAM usage, ascending. If you would like a different sort order,
+# you can provide another flavor attribute as sorting key. Alternatively, you
+# can provide a custom callback method to use for sorting. You can also provide
+# a flag for reverse sort. For more info, see
+# http://docs.python.org/2/library/functions.html#sorted
+# CREATE_INSTANCE_FLAVOR_SORT = {
+# 'key': 'name',
+# # or
+# 'key': my_awesome_callback_method,
+# 'reverse': False,
+# }
+
+# The Horizon Policy Enforcement engine uses these values to load per service
+# policy rule files. The content of these files should match the files the
+# OpenStack services are using to determine role based access control in the
+# target installation.
+
+# Path to directory containing policy.json files
+#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
+# Map of local copy of service policy files
+#POLICY_FILES = {
+# 'identity': 'keystone_policy.json',
+# 'compute': 'nova_policy.json',
+# 'volume': 'cinder_policy.json',
+# 'image': 'glance_policy.json',
+# 'orchestration': 'heat_policy.json',
+# 'network': 'neutron_policy.json',
+#}
+
+# Trove user and database extension support. By default support for
+# creating users and databases on database instances is turned on.
+# To disable these extensions set the permission here to something
+# unusable such as ["!"].
+# TROVE_ADD_USER_PERMS = []
+# TROVE_ADD_DATABASE_PERMS = []
+
+LOGGING = {
+ 'version': 1,
+ # When set to True this will disable all logging except
+ # for loggers specified in this configuration dictionary. Note that
+ # if nothing is specified here and disable_existing_loggers is True,
+ # django.db.backends will still log unless it is disabled explicitly.
+ 'disable_existing_loggers': False,
+ 'handlers': {
+ 'null': {
+ 'level': 'DEBUG',
+ 'class': 'django.utils.log.NullHandler',
+ },
+ 'console': {
+ # Set the level to "DEBUG" for verbose output logging.
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ },
+ },
+ 'loggers': {
+ # Logging from django.db.backends is VERY verbose, send to null
+ # by default.
+ 'django.db.backends': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'requests': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'horizon': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_dashboard': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'novaclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'cinderclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'keystoneclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'glanceclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'neutronclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'heatclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'ceilometerclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'troveclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'swiftclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_auth': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'nose.plugins.manager': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'django': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'iso8601': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'scss': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ }
+}
+
+# 'direction' should not be specified for all_tcp/udp/icmp.
+# It is specified in the form.
+SECURITY_GROUP_RULES = {
+ 'all_tcp': {
+ 'name': _('All TCP'),
+ 'ip_protocol': 'tcp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_udp': {
+ 'name': _('All UDP'),
+ 'ip_protocol': 'udp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_icmp': {
+ 'name': _('All ICMP'),
+ 'ip_protocol': 'icmp',
+ 'from_port': '-1',
+ 'to_port': '-1',
+ },
+ 'ssh': {
+ 'name': 'SSH',
+ 'ip_protocol': 'tcp',
+ 'from_port': '22',
+ 'to_port': '22',
+ },
+ 'smtp': {
+ 'name': 'SMTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '25',
+ 'to_port': '25',
+ },
+ 'dns': {
+ 'name': 'DNS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '53',
+ 'to_port': '53',
+ },
+ 'http': {
+ 'name': 'HTTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '80',
+ 'to_port': '80',
+ },
+ 'pop3': {
+ 'name': 'POP3',
+ 'ip_protocol': 'tcp',
+ 'from_port': '110',
+ 'to_port': '110',
+ },
+ 'imap': {
+ 'name': 'IMAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '143',
+ 'to_port': '143',
+ },
+ 'ldap': {
+ 'name': 'LDAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '389',
+ 'to_port': '389',
+ },
+ 'https': {
+ 'name': 'HTTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '443',
+ 'to_port': '443',
+ },
+ 'smtps': {
+ 'name': 'SMTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '465',
+ 'to_port': '465',
+ },
+ 'imaps': {
+ 'name': 'IMAPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '993',
+ 'to_port': '993',
+ },
+ 'pop3s': {
+ 'name': 'POP3S',
+ 'ip_protocol': 'tcp',
+ 'from_port': '995',
+ 'to_port': '995',
+ },
+ 'ms_sql': {
+ 'name': 'MS SQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '1433',
+ 'to_port': '1433',
+ },
+ 'mysql': {
+ 'name': 'MYSQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3306',
+ 'to_port': '3306',
+ },
+ 'rdp': {
+ 'name': 'RDP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3389',
+ 'to_port': '3389',
+ },
+}
+
+# Deprecation Notice:
+#
+# The setting FLAVOR_EXTRA_KEYS has been deprecated.
+# Please load extra spec metadata into the Glance Metadata Definition Catalog.
+#
+# The sample quota definitions can be found in:
+# <glance_source>/etc/metadefs/compute-quota.json
+#
+# The metadata definition catalog supports CLI and API:
+# $glance --os-image-api-version 2 help md-namespace-import
+# $glance-manage db_load_metadefs <directory_with_definition_files>
+#
+# See Metadata Definitions on: http://docs.openstack.org/developer/glance/
+
+# Indicate to the Sahara data processing service whether or not
+# automatic floating IP allocation is in effect. If it is not
+# in effect, the user will be prompted to choose a floating IP
+# pool for use in their cluster. False by default. You would want
+# to set this to True if you were running Nova Networking with
+# auto_assign_floating_ip = True.
+# SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
+
+# The hash algorithm to use for authentication tokens. This must
+# match the hash algorithm that the identity server and the
+# auth_token middleware are using. Allowed values are the
+# algorithms supported by Python's hashlib library.
+# OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
+LOGIN_URL='/horizon/auth/login/'
+LOGOUT_URL='/horizon/auth/logout/'
+LOGIN_REDIRECT_URL='/horizon/'
diff --git a/install-files/openstack/etc/tempest/tempest.conf b/install-files/openstack/etc/tempest/tempest.conf
new file mode 100644
index 00000000..05f0eca1
--- /dev/null
+++ b/install-files/openstack/etc/tempest/tempest.conf
@@ -0,0 +1,1116 @@
+[DEFAULT]
+
+#
+# From tempest.config
+#
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking = false
+
+# Directory to use for lock files. (string value)
+lock_path = /run/lock
+
+#
+# From tempest.config
+#
+
+# Print debugging output (set logging level to DEBUG instead of
+# default WARNING level). (boolean value)
+#debug = false
+
+# Print more verbose output (set logging level to INFO instead of
+# default WARNING level). (boolean value)
+#verbose = false
+
+#
+# From tempest.config
+#
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Format string for %%(asctime)s in log records. Default: %(default)s
+# . (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) The base directory used for relative --log-file paths.
+# (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# (Optional) Name of log file to output to. If no default is set,
+# logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# DEPRECATED. A logging.Formatter log message format string which may
+# use any of the available logging.LogRecord attributes. This option
+# is deprecated. Please use logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format = <None>
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility = LOG_USER
+
+# Use syslog for logging. Existing syslog format is DEPRECATED during
+# I, and will change in J to honor RFC5424. (boolean value)
+use_syslog = true
+
+# (Optional) Enables or disables syslog rfc5424 format for logging. If
+# enabled, prefixes the MSG part of the syslog message with APP-NAME
+# (RFC5424). The format without the APP-NAME is deprecated in I, and
+# will be removed in J. (boolean value)
+#use_syslog_rfc_format = false
+
+#
+# From tempest.config
+#
+
+# Log output to standard error. (boolean value)
+#use_stderr = true
+
+#
+# From tempest.config
+#
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Format string to use for log messages without context. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+
+[auth]
+
+#
+# From tempest.config
+#
+
+# Allows test cases to create/destroy tenants and users. This option
+# requires that OpenStack Identity API admin credentials are known. If
+# false, isolated test cases and parallel execution, can still be
+# achieved configuring a list of test accounts (boolean value)
+# Deprecated group/name - [compute]/allow_tenant_isolation
+# Deprecated group/name - [orchestration]/allow_tenant_isolation
+allow_tenant_isolation = true
+
+# If set to True it enables the Accounts provider, which locks
+# credentials to allow for parallel execution with pre-provisioned
+# accounts. It can only be used to run tests that ensure credentials
+# cleanup happens. It requires at least `2 * CONC` distinct accounts
+# configured in `test_accounts_file`, with CONC == the number of
+# concurrent test processes. (boolean value)
+#locking_credentials_provider = false
+
+# Path to the yaml file that contains the list of credentials to use
+# for running tests (string value)
+#test_accounts_file = etc/accounts.yaml
+
+
+[baremetal]
+
+#
+# From tempest.config
+#
+
+# Timeout for Ironic node to completely provision (integer value)
+#active_timeout = 300
+
+# Timeout for association of Nova instance and Ironic node (integer
+# value)
+#association_timeout = 30
+
+# Catalog type of the baremetal provisioning service (string value)
+#catalog_type = baremetal
+
+# Driver name which Ironic uses (string value)
+#driver = fake
+
+# Whether the Ironic nova-compute driver is enabled (boolean value)
+#driver_enabled = false
+
+# The endpoint type to use for the baremetal provisioning service
+# (string value)
+#endpoint_type = publicURL
+
+# Timeout for Ironic power transitions. (integer value)
+#power_timeout = 60
+
+# Timeout for unprovisioning an Ironic node. (integer value)
+#unprovision_timeout = 60
+
+
+[boto]
+
+#
+# From tempest.config
+#
+
+# AKI Kernel Image manifest (string value)
+#aki_manifest = cirros-0.3.0-x86_64-vmlinuz.manifest.xml
+
+# AMI Machine Image manifest (string value)
+#ami_manifest = cirros-0.3.0-x86_64-blank.img.manifest.xml
+
+# ARI Ramdisk Image manifest (string value)
+#ari_manifest = cirros-0.3.0-x86_64-initrd.manifest.xml
+
+# AWS Access Key (string value)
+#aws_access = <None>
+
+# AWS Secret Key (string value)
+#aws_secret = <None>
+
+# AWS Zone for EC2 tests (string value)
+#aws_zone = nova
+
+# Status Change Test Interval (integer value)
+#build_interval = 1
+
+# Status Change Timeout (integer value)
+#build_timeout = 60
+
+# EC2 URL (string value)
+#ec2_url = http://localhost:8773/services/Cloud
+
+# boto Http socket timeout (integer value)
+#http_socket_timeout = 3
+
+# Instance type (string value)
+#instance_type = m1.tiny
+
+# boto num_retries on error (integer value)
+#num_retries = 1
+
+# S3 Materials Path (string value)
+#s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.0
+
+# S3 URL (string value)
+#s3_url = http://localhost:8080
+
+
+[cli]
+
+#
+# From tempest.config
+#
+
+# directory where python client binaries are located (string value)
+cli_dir = /usr/bin
+
+# enable cli tests (boolean value)
+#enabled = true
+
+# Whether the tempest run location has access to the *-manage
+# commands. In a pure blackbox environment it will not. (boolean
+# value)
+#has_manage = true
+
+# Number of seconds to wait on a CLI timeout (integer value)
+#timeout = 15
+
+
+[compute]
+
+#
+# From tempest.config
+#
+
+# Time in seconds between build status checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for an instance to build. (integer value)
+#build_timeout = 300
+
+# Catalog type of the Compute service. (string value)
+#catalog_type = compute
+
+# Catalog type of the Compute v3 service. (string value)
+#catalog_v3_type = computev3
+
+# The endpoint type to use for the compute service. (string value)
+#endpoint_type = publicURL
+
+# Visible fixed network name (string value)
+#fixed_network_name = private
+
+# Valid primary flavor to use in tests. (string value)
+#flavor_ref = 1
+
+# Valid secondary flavor to be used in tests. (string value)
+#flavor_ref_alt = 2
+
+# Unallocated floating IP range, which will be used to test the
+# floating IP bulk feature for CRUD operation. (string value)
+#floating_ip_range = 10.0.0.0/29
+
+# Password used to authenticate to an instance using the alternate
+# image. (string value)
+#image_alt_ssh_password = password
+
+# User name used to authenticate to an instance using the alternate
+# image. (string value)
+#image_alt_ssh_user = root
+
+# Valid primary image reference to be used in tests. This is a
+# required option (string value)
+#image_ref = <None>
+
+# Valid secondary image reference to be used in tests. This is a
+# required option, but if only one image is available duplicate the
+# value of image_ref above (string value)
+#image_ref_alt = <None>
+
+# Password used to authenticate to an instance. (string value)
+#image_ssh_password = password
+
+# User name used to authenticate to an instance. (string value)
+#image_ssh_user = root
+
+# IP version used for SSH connections. (integer value)
+#ip_version_for_ssh = 4
+
+# Network used for SSH connections. (string value)
+#network_for_ssh = public
+
+# Path to a private key file for SSH access to remote hosts (string
+# value)
+#path_to_private_key = <None>
+
+# Timeout in seconds to wait for ping to succeed. (integer value)
+#ping_timeout = 120
+
+# Additional wait time for clean state, when there is no OS-EXT-STS
+# extension available (integer value)
+#ready_wait = 0
+
+# The compute region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# Should the tests ssh to instances? (boolean value)
+#run_ssh = false
+
+# Time in seconds before a shelved instance is eligible for removing
+# from a host. -1 never offload, 0 offload when shelved. This time
+# should be the same as the time of nova.conf, and some tests will run
+# for as long as the time. (integer value)
+#shelved_offload_time = 0
+
+# Auth method used for authenticate to the instance. Valid choices
+# are: keypair, configured, adminpass. keypair: start the servers with
+# an ssh keypair. configured: use the configured user and password.
+# adminpass: use the injected adminPass. disabled: avoid using ssh
+# when it is an option. (string value)
+#ssh_auth_method = keypair
+
+# Timeout in seconds to wait for output from ssh channel. (integer
+# value)
+#ssh_channel_timeout = 60
+
+# How to connect to the instance? fixed: using the first ip belongs
+# the fixed network floating: creating and using a floating ip (string
+# value)
+#ssh_connect_method = fixed
+
+# Timeout in seconds to wait for authentication to succeed. (integer
+# value)
+#ssh_timeout = 300
+
+# User name used to authenticate to an instance. (string value)
+#ssh_user = root
+
+# Does SSH use Floating IPs? (boolean value)
+#use_floatingip_for_ssh = true
+
+# Expected device name when a volume is attached to an instance
+# (string value)
+#volume_device_name = vdb
+
+
+[compute-admin]
+
+#
+# From tempest.config
+#
+
+# Domain name for authentication as admin (Keystone V3).The same
+# domain applies to user and project (string value)
+#domain_name = <None>
+
+# API key to use when authenticating as admin. (string value)
+password = {{ NOVA_SERVICE_PASSWORD }}
+
+# Administrative Tenant name to use for Nova API requests. (string
+# value)
+tenant_name = service
+
+# Administrative Username to use for Nova API requests. (string value)
+username = {{ NOVA_SERVICE_USER }}
+
+
+[compute-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# A list of enabled compute extensions with a special entry all which
+# indicates every extension is enabled. Each extension should be
+# specified with alias name. Empty list indicates all extensions are
+# disabled (list value)
+#api_extensions = all
+
+# If false, skip all nova v3 tests. (boolean value)
+api_v3 = false
+
+# A list of enabled v3 extensions with a special entry all which
+# indicates every extension is enabled. Each extension should be
+# specified with alias name. Empty list indicates all extensions are
+# disabled (list value)
+#api_v3_extensions = all
+
+# Does the test environment block migration support cinder iSCSI
+# volumes (boolean value)
+#block_migrate_cinder_iscsi = false
+
+# Does the test environment use block devices for live migration
+# (boolean value)
+#block_migration_for_live_migration = false
+
+# Does the test environment support changing the admin password?
+# (boolean value)
+#change_password = false
+
+# Does the test environment support obtaining instance serial console
+# output? (boolean value)
+#console_output = true
+
+# If false, skip disk config tests (boolean value)
+#disk_config = true
+
+# Enables returning of the instance password by the relevant server
+# API calls such as create, rebuild or rescue. (boolean value)
+#enable_instance_password = true
+
+# Does the test environment support dynamic network interface
+# attachment? (boolean value)
+#interface_attach = true
+
+# Does the test environment support live migration available? (boolean
+# value)
+#live_migration = false
+
+# Does the test environment support pausing? (boolean value)
+#pause = true
+
+# Enable RDP console. This configuration value should be same as
+# [nova.rdp]->enabled in nova.conf (boolean value)
+#rdp_console = false
+
+# Does the test environment support instance rescue mode? (boolean
+# value)
+#rescue = true
+
+# Does the test environment support resizing? (boolean value)
+#resize = false
+
+# Does the test environment support shelving/unshelving? (boolean
+# value)
+#shelve = true
+
+# Does the test environment support creating snapshot images of
+# running instances? (boolean value)
+snapshot = true
+
+# Enable Spice console. This configuration value should be same as
+# [nova.spice]->enabled in nova.conf (boolean value)
+spice_console = false
+
+# Does the test environment support suspend/resume? (boolean value)
+#suspend = true
+
+# Enable VNC console. This configuration value should be same as
+# [nova.vnc]->vnc_enabled in nova.conf (boolean value)
+vnc_console = true
+
+# If false skip all v2 api tests with xml (boolean value)
+#xml_api_v2 = true
+
+
+[dashboard]
+
+#
+# From tempest.config
+#
+
+# Where the dashboard can be found (string value)
+dashboard_url = http://{{ CONTROLLER_HOST_ADDRESS }}/horizon
+
+# Login page for the dashboard (string value)
+login_url = http://{{ CONTROLLER_HOST_ADDRESS }}/horizon/auth/login/
+
+
+[data_processing]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the data processing service. (string value)
+#catalog_type = data_processing
+
+# The endpoint type to use for the data processing service. (string
+# value)
+#endpoint_type = publicURL
+
+
+[database]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Database service. (string value)
+#catalog_type = database
+
+# Current database version to use in database tests. (string value)
+#db_current_version = v1.0
+
+# Valid primary flavor to use in database tests. (string value)
+#db_flavor_ref = 1
+
+
+[debug]
+
+#
+# From tempest.config
+#
+
+# Enable diagnostic commands (boolean value)
+#enable = true
+
+# A regex to determine which requests should be traced. This is a
+# regex to match the caller for rest client requests to be able to
+# selectively trace calls out of specific classes and methods. It
+# largely exists for test development, and is not expected to be used
+# in a real deploy of tempest. This will be matched against the
+# discovered ClassName:method in the test environment. Expected
+# values for this field are: * ClassName:test_method_name - traces
+# one test_method * ClassName:setUp(Class) - traces specific setup
+# functions * ClassName:tearDown(Class) - traces specific teardown
+# functions * ClassName:_run_cleanups - traces the cleanup functions
+# If nothing is specified, this feature is not enabled. To trace
+# everything specify .* as the regex. (string value)
+#trace_requests =
+
+
+[identity]
+
+#
+# From tempest.config
+#
+
+# Admin domain name for authentication (Keystone V3).The same domain
+# applies to user and project (string value)
+#admin_domain_name = <None>
+
+# API key to use when authenticating as admin. (string value)
+admin_password = {{ KEYSTONE_ADMIN_PASSWORD }}
+
+# Role required to administrate keystone. (string value)
+admin_role = admin
+
+# Administrative Tenant name to use for Keystone API requests. (string
+# value)
+admin_tenant_name = admin
+
+# Administrative Username to use for Keystone API requests. (string
+# value)
+admin_username = admin
+
+# Alternate domain name for authentication (Keystone V3).The same
+# domain applies to user and project (string value)
+#alt_domain_name = <None>
+
+# API key to use when authenticating as alternate user. (string value)
+#alt_password = <None>
+
+# Alternate user's Tenant name to use for Nova API requests. (string
+# value)
+#alt_tenant_name = <None>
+
+# Username of alternate user to use for Nova API requests. (string
+# value)
+#alt_username = <None>
+
+# Identity API version to be used for authentication for API tests.
+# (string value)
+auth_version = v2
+
+# Catalog type of the Identity service. (string value)
+catalog_type = identity
+
+# Set to True if using self-signed SSL certificates. (boolean value)
+#disable_ssl_certificate_validation = false
+
+# Domain name for authentication (Keystone V3).The same domain applies
+# to user and project (string value)
+#domain_name = <None>
+
+# The endpoint type to use for the identity service. (string value)
+#endpoint_type = publicURL
+
+# API key to use when authenticating. (string value)
+password = {{ NOVA_SERVICE_PASSWORD }}
+
+# The identity region name to use. Also used as the other services'
+# region name unless they are set explicitly. If no such region is
+# found in the service catalog, the first found one is used. (string
+# value)
+#region = RegionOne
+
+# Tenant name to use for Nova API requests. (string value)
+tenant_name = service
+
+# Full URI of the OpenStack Identity API (Keystone), v2 (string value)
+uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0/
+
+# Full URI of the OpenStack Identity API (Keystone), v3 (string value)
+#
+# Tempest complains if we don't set any uri_v3, even if it's disabled.
+uri_v3 = <None>
+
+# Username to use for Nova API requests. (string value)
+username = {{ NOVA_SERVICE_USER }}
+
+
+[identity-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# Is the v2 identity API enabled (boolean value)
+api_v2 = true
+
+# Is the v3 identity API enabled (boolean value)
+api_v3 = false
+
+# Does the identity service have delegation and impersonation enabled
+# (boolean value)
+#trust = true
+
+
+[image]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Image service. (string value)
+catalog_type = image
+
+# The endpoint type to use for the image service. (string value)
+endpoint_type = publicURL
+
+# http accessible image (string value)
+http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz
+
+# The image region name to use. If empty, the value of identity.region
+# is used instead. If no such region is found in the service catalog,
+# the first found one is used. (string value)
+#region =
+
+
+[image-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# Is the v1 image API enabled (boolean value)
+#api_v1 = true
+
+# Is the v2 image API enabled (boolean value)
+api_v2 = true
+
+
+[input-scenario]
+
+#
+# From tempest.config
+#
+
+# Matching flavors become parameters for scenario tests (string value)
+#flavor_regex = ^m1.nano$
+
+# Matching images become parameters for scenario tests (string value)
+#image_regex = ^cirros-0.3.1-x86_64-uec$
+
+# SSH verification in tests is skippedfor matching images (string
+# value)
+#non_ssh_image_regex = ^.*[Ww]in.*$
+
+# List of user mapped to regex to matching image names. (string value)
+#ssh_user_regex = [["^.*[Cc]irros.*$", "root"]]
+
+
+[messaging]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Messaging service. (string value)
+#catalog_type = messaging
+
+# The maximum grace period for a claim (integer value)
+#max_claim_grace = 43200
+
+# The maximum ttl for a claim (integer value)
+#max_claim_ttl = 43200
+
+# The maximum size of a message body (integer value)
+#max_message_size = 262144
+
+# The maximum ttl for a message (integer value)
+#max_message_ttl = 1209600
+
+# The maximum number of messages per claim (integer value)
+#max_messages_per_claim = 20
+
+# The maximum number of queue message per page when listing (or)
+# posting messages (integer value)
+#max_messages_per_page = 20
+
+# The maximum metadata size for a queue (integer value)
+#max_queue_metadata = 65536
+
+# The maximum number of queue records per page when listing queues
+# (integer value)
+#max_queues_per_page = 20
+
+
+[negative]
+
+#
+# From tempest.config
+#
+
+# Test generator class for all negative tests (string value)
+#test_generator = tempest.common.generator.negative_generator.NegativeTestGenerator
+
+
+[network]
+
+#
+# From tempest.config
+#
+
+# Time in seconds between network operation status checks. (integer
+# value)
+#build_interval = 1
+
+# Timeout in seconds to wait for network operation to complete.
+# (integer value)
+#build_timeout = 300
+
+# Catalog type of the Neutron service. (string value)
+#catalog_type = network
+
+# List of dns servers whichs hould be used for subnet creation (list
+# value)
+#dns_servers = 8.8.8.8,8.8.4.4
+
+# The endpoint type to use for the network service. (string value)
+#endpoint_type = publicURL
+
+# Id of the public network that provides external connectivity (string
+# value)
+#public_network_id =
+
+# Id of the public router that provides external connectivity (string
+# value)
+#public_router_id =
+
+# The network region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# The cidr block to allocate tenant ipv4 subnets from (string value)
+#tenant_network_cidr = 10.100.0.0/16
+
+# The mask bits for tenant ipv4 subnets (integer value)
+#tenant_network_mask_bits = 28
+
+# The cidr block to allocate tenant ipv6 subnets from (string value)
+#tenant_network_v6_cidr = 2003::/48
+
+# The mask bits for tenant ipv6 subnets (integer value)
+#tenant_network_v6_mask_bits = 64
+
+# Whether tenant network connectivity should be evaluated directly
+# (boolean value)
+#tenant_networks_reachable = false
+
+
+[network-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# A list of enabled network extensions with a special entry all which
+# indicates every extension is enabled. Empty list indicates all
+# extensions are disabled (list value)
+#api_extensions = all
+
+# Allow the execution of IPv6 tests (boolean value)
+#ipv6 = true
+
+# Allow the execution of IPv6 subnet tests that use the extended IPv6
+# attributes ipv6_ra_mode and ipv6_address_mode (boolean value)
+#ipv6_subnet_attributes = false
+
+
+[object-storage]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Object-Storage service. (string value)
+#catalog_type = object-store
+
+# Number of seconds to wait while looping to check the status of a
+# container to container synchronization (integer value)
+#container_sync_interval = 5
+
+# Number of seconds to time on waiting for a container to container
+# synchronization complete. (integer value)
+#container_sync_timeout = 120
+
+# The endpoint type to use for the object-store service. (string
+# value)
+#endpoint_type = publicURL
+
+# Role to add to users created for swift tests to enable creating
+# containers (string value)
+#operator_role = Member
+
+# The object-storage region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# User role that has reseller admin (string value)
+#reseller_admin_role = ResellerAdmin
+
+
+[object-storage-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# Execute (old style) container-sync tests (boolean value)
+#container_sync = true
+
+# Execute discoverability tests (boolean value)
+#discoverability = true
+
+# A list of the enabled optional discoverable apis. A single entry,
+# all, indicates that all of these features are expected to be enabled
+# (list value)
+#discoverable_apis = all
+
+# Execute object-versioning tests (boolean value)
+#object_versioning = true
+
+
+[orchestration]
+
+#
+# From tempest.config
+#
+
+# Time in seconds between build status checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for a stack to build. (integer value)
+#build_timeout = 1200
+
+# Catalog type of the Orchestration service. (string value)
+#catalog_type = orchestration
+
+# The endpoint type to use for the orchestration service. (string
+# value)
+#endpoint_type = publicURL
+
+# Name of heat-cfntools enabled image to use when launching test
+# instances. (string value)
+#image_ref = <None>
+
+# Instance type for tests. Needs to be big enough for a full OS plus
+# the test workload (string value)
+#instance_type = m1.micro
+
+# Name of existing keypair to launch servers with. (string value)
+#keypair_name = <None>
+
+# Value must match heat configuration of the same name. (integer
+# value)
+#max_resources_per_stack = 1000
+
+# Value must match heat configuration of the same name. (integer
+# value)
+#max_template_size = 524288
+
+# The orchestration region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+
+[scenario]
+
+#
+# From tempest.config
+#
+
+# AKI image file name (string value)
+#aki_img_file = cirros-0.3.1-x86_64-vmlinuz
+
+# AMI image file name (string value)
+#ami_img_file = cirros-0.3.1-x86_64-blank.img
+
+# ARI image file name (string value)
+#ari_img_file = cirros-0.3.1-x86_64-initrd
+
+# Image container format (string value)
+#img_container_format = bare
+
+# Directory containing image files (string value)
+#img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
+
+# Image disk format (string value)
+#img_disk_format = qcow2
+
+# Image file name (string value)
+# Deprecated group/name - [DEFAULT]/qcow2_img_file
+#img_file = cirros-0.3.1-x86_64-disk.img
+
+# specifies how many resources to request at once. Used for large
+# operations testing. (integer value)
+#large_ops_number = 0
+
+# ssh username for the image file (string value)
+#ssh_user = cirros
+
+
+[service_available]
+
+#
+# From tempest.config
+#
+
+# Whether or not Ceilometer is expected to be available (boolean
+# value)
+ceilometer = false
+
+# Whether or not cinder is expected to be available (boolean value)
+cinder = true
+
+# Whether or not glance is expected to be available (boolean value)
+glance = true
+
+# Whether or not Heat is expected to be available (boolean value)
+heat = false
+
+# Whether or not Horizon is expected to be available (boolean value)
+horizon = true
+
+# Whether or not Ironic is expected to be available (boolean value)
+ironic = false
+
+# Whether or not neutron is expected to be available (boolean value)
+neutron = true
+
+# Whether or not nova is expected to be available (boolean value)
+nova = true
+
+# Whether or not Sahara is expected to be available (boolean value)
+sahara = false
+
+# Whether or not swift is expected to be available (boolean value)
+swift = false
+
+# Whether or not Trove is expected to be available (boolean value)
+trove = false
+
+# Whether or not Zaqar is expected to be available (boolean value)
+zaqar = false
+
+
+[stress]
+
+#
+# From tempest.config
+#
+
+# Controller host. (string value)
+#controller = <None>
+
+# The number of threads created while stress test. (integer value)
+#default_thread_number_per_action = 4
+
+# Allows a full cleaning process after a stress test. Caution : this
+# cleanup will remove every objects of every tenant. (boolean value)
+#full_clean_stack = false
+
+# Prevent the cleaning (tearDownClass()) between each stress test run
+# if an exception occurs during this run. (boolean value)
+#leave_dirty_stack = false
+
+# time (in seconds) between log file error checks. (integer value)
+#log_check_interval = 60
+
+# Maximum number of instances to create during test. (integer value)
+#max_instances = 16
+
+# Directory containing log files on the compute nodes (string value)
+#nova_logdir = <None>
+
+# Controller host. (string value)
+#target_controller = <None>
+
+# regexp for list of log files. (string value)
+#target_logfiles = <None>
+
+# Path to private key. (string value)
+#target_private_key_path = <None>
+
+# ssh user. (string value)
+#target_ssh_user = <None>
+
+
+[telemetry]
+
+#
+# From tempest.config
+#
+
+# Catalog type of the Telemetry service. (string value)
+#catalog_type = metering
+
+# The endpoint type to use for the telemetry service. (string value)
+#endpoint_type = publicURL
+
+# This variable is used as flag to enable notification tests (boolean
+# value)
+#too_slow_to_test = true
+
+
+[volume]
+
+#
+# From tempest.config
+#
+
+# Name of the backend1 (must be declared in cinder.conf) (string
+# value)
+backend1_name = LVM_iSCSI
+
+# Name of the backend2 (must be declared in cinder.conf) (string
+# value)
+#backend2_name = BACKEND_2
+
+# Time in seconds between volume availability checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for a volume to become available.
+# (integer value)
+#build_timeout = 300
+
+# Catalog type of the Volume Service (string value)
+catalog_type = volume
+
+# Disk format to use when copying a volume to image (string value)
+disk_format = raw
+
+# The endpoint type to use for the volume service. (string value)
+endpoint_type = publicURL
+
+# The volume region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# Backend protocol to target when creating volume types (string value)
+storage_protocol = iSCSI
+
+# Backend vendor to target when creating volume types (string value)
+#vendor_name = Open Source
+
+# Default size in GB for volumes created by volumes tests (integer
+# value)
+volume_size = 1
+
+
+[volume-feature-enabled]
+
+#
+# From tempest.config
+#
+
+# A list of enabled volume extensions with a special entry all which
+# indicates every extension is enabled. Empty list indicates all
+# extensions are disabled (list value)
+#api_extensions = all
+
+# Is the v1 volume API enabled (boolean value)
+api_v1 = true
+
+# Is the v2 volume API enabled (boolean value)
+api_v2 = true
+
+# Runs Cinder volumes backup test (boolean value)
+backup = true
+
+# Runs Cinder multi-backend test (requires 2 backends) (boolean value)
+multi_backend = false
+
+# Runs Cinder volume snapshot test (boolean value)
+snapshot = true
diff --git a/install-files/openstack/manifest b/install-files/openstack/manifest
new file mode 100644
index 00000000..aa4d5430
--- /dev/null
+++ b/install-files/openstack/manifest
@@ -0,0 +1,190 @@
+0040755 0 0 /etc/horizon
+0100644 0 0 /etc/horizon/apache-horizon.conf
+0040755 0 0 /etc/horizon/openstack_dashboard
+0100644 0 0 /etc/horizon/openstack_dashboard/local_settings.py
+template 0100644 0 0 /etc/tempest/tempest.conf
+0040755 0 0 /usr/share/openstack
+0100644 0 0 /usr/share/openstack/hosts
+0040755 0 0 /usr/share/openstack/ceilometer
+0100644 0 0 /usr/share/openstack/ceilometer-config.yml
+0100644 0 0 /usr/share/openstack/ceilometer-db.yml
+0100644 0 0 /usr/share/openstack/ceilometer/ceilometer.conf
+0040755 0 0 /usr/share/openstack/cinder
+0100644 0 0 /usr/share/openstack/cinder-config.yml
+0100644 0 0 /usr/share/openstack/cinder-db.yml
+0100644 0 0 /usr/share/openstack/cinder-lvs.yml
+0100644 0 0 /usr/share/openstack/cinder/cinder.conf
+0100644 0 0 /usr/share/openstack/cinder/api-paste.ini
+0100644 0 0 /usr/share/openstack/cinder/policy.json
+0040755 0 0 /usr/share/openstack/extras
+0100644 0 0 /usr/share/openstack/extras/00-disable-device.network
+0100644 0 0 /usr/share/openstack/extras/60-device-dhcp.network
+0100644 0 0 /usr/share/openstack/glance.yml
+0040755 0 0 /usr/share/openstack/glance
+0100644 0 0 /usr/share/openstack/glance/logging.conf
+0100644 0 0 /usr/share/openstack/glance/glance-api.conf
+0100644 0 0 /usr/share/openstack/glance/glance-registry.conf
+0100644 0 0 /usr/share/openstack/glance/glance-scrubber.conf
+0100644 0 0 /usr/share/openstack/glance/glance-cache.conf
+0100644 0 0 /usr/share/openstack/glance/schema-image.json
+0100644 0 0 /usr/share/openstack/glance/policy.json
+0100644 0 0 /usr/share/openstack/glance/glance-api-paste.ini
+0100644 0 0 /usr/share/openstack/glance/glance-registry-paste.ini
+0100644 0 0 /usr/share/openstack/horizon.yml
+0040755 0 0 /usr/share/openstack/ironic
+0100644 0 0 /usr/share/openstack/ironic.yml
+0100644 0 0 /usr/share/openstack/ironic/ironic.conf
+0100644 0 0 /usr/share/openstack/ironic/policy.json
+0100644 0 0 /usr/share/openstack/iscsi.yml
+0100644 0 0 /usr/share/openstack/keystone.yml
+0040755 0 0 /usr/share/openstack/keystone
+0100644 0 0 /usr/share/openstack/keystone/logging.conf
+0100644 0 0 /usr/share/openstack/keystone/keystone.conf
+0100644 0 0 /usr/share/openstack/keystone/policy.json
+0100644 0 0 /usr/share/openstack/keystone/keystone-paste.ini
+0100644 0 0 /usr/share/openstack/network.yml
+0040755 0 0 /usr/share/openstack/neutron
+0100644 0 0 /usr/share/openstack/neutron-config.yml
+0100644 0 0 /usr/share/openstack/neutron-db.yml
+0100644 0 0 /usr/share/openstack/neutron/neutron.conf
+0100644 0 0 /usr/share/openstack/neutron/api-paste.ini
+0100644 0 0 /usr/share/openstack/neutron/policy.json
+0100644 0 0 /usr/share/openstack/neutron/l3_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/dhcp_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/lbaas_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/metadata_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/fwaas_driver.ini
+0100644 0 0 /usr/share/openstack/neutron/metering_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/vpn_agent.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/
+0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch
+0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl
+0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs
+0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs
+0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README
+0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README
+0040755 0 0 /usr/share/openstack/neutron/plugins/brocade
+0100644 0 0 /usr/share/openstack/neutron/plugins/brocade/brocade.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/cisco
+0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/embrane
+0100644 0 0 /usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/hyperv
+0100644 0 0 /usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/ibm
+0100644 0 0 /usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/linuxbridge
+0100644 0 0 /usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/metaplugin
+0100644 0 0 /usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/midonet
+0100644 0 0 /usr/share/openstack/neutron/plugins/midonet/midonet.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/ml2
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini
+0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/mlnx
+0100644 0 0 /usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/nec
+0100644 0 0 /usr/share/openstack/neutron/plugins/nec/nec.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/nuage
+0100644 0 0 /usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/oneconvergence
+0100644 0 0 /usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/opencontrail
+0100644 0 0 /usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/openvswitch
+0100644 0 0 /usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/plumgrid
+0100644 0 0 /usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini
+0040755 0 0 /usr/share/openstack/neutron/plugins/vmware
+0100644 0 0 /usr/share/openstack/neutron/plugins/vmware/nsx.ini
+0040755 0 0 /usr/share/openstack/nova
+0100644 0 0 /usr/share/openstack/nova-config.yml
+0100644 0 0 /usr/share/openstack/nova-db.yml
+0100644 0 0 /usr/share/openstack/nova/logging.conf
+0100644 0 0 /usr/share/openstack/nova/nova.conf
+0100644 0 0 /usr/share/openstack/nova/nova-compute.conf
+0100644 0 0 /usr/share/openstack/nova/policy.json
+0100644 0 0 /usr/share/openstack/nova/cells.json
+0100644 0 0 /usr/share/openstack/nova/api-paste.ini
+0100644 0 0 /usr/share/openstack/openvswitch.yml
+0040755 0 0 /usr/share/openstack/postgres
+0100644 0 0 /usr/share/openstack/postgres.yml
+0100644 0 0 /usr/share/openstack/postgres/pg_hba.conf
+0100644 0 0 /usr/share/openstack/postgres/postgresql.conf
+0040755 0 0 /usr/share/openstack/rabbitmq
+0100644 0 0 /usr/share/openstack/rabbitmq/rabbitmq-env.conf
+0100644 0 0 /usr/share/openstack/rabbitmq/rabbitmq.config
+0040755 0 0 /usr/lib/sysctl.d
+0100644 0 0 /usr/lib/sysctl.d/neutron.conf
+0100644 0 0 /usr/lib/systemd/system/apache-httpd.service
+0100644 0 0 /usr/lib/systemd/system/iscsi-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-keystone.service
+0100644 0 0 /usr/lib/systemd/system/openstack-keystone-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-glance-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-glance-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-glance-registry.service
+0100644 0 0 /usr/lib/systemd/system/openstack-horizon-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ironic-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ironic-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ironic-conductor.service
+0100644 0 0 /usr/lib/systemd/system/openstack-network-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-config-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-db-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-server.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-metadata-agent.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-dhcp-agent.service
+0100644 0 0 /usr/lib/systemd/system/openstack-neutron-l3-agent.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-config-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-db-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-compute.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-conductor.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-scheduler.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-consoleauth.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-novncproxy.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-cert.service
+0100644 0 0 /usr/lib/systemd/system/openstack-nova-serialproxy.service
+0100644 0 0 /usr/lib/systemd/system/rabbitmq-server.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-config-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-db-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-lv-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-scheduler.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-volume.service
+0100644 0 0 /usr/lib/systemd/system/openstack-cinder-backup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-config-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-db-setup.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-api.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-central.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-collector.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-compute.service
+0100644 0 0 /usr/lib/systemd/system/openstack-ceilometer-notification.service
+0100644 0 0 /usr/lib/systemd/system/openvswitch-setup.service
+0100644 0 0 /usr/lib/systemd/system/openvswitch-db-server.service
+0100644 0 0 /usr/lib/systemd/system/openvswitch.service
+0100644 0 0 /usr/lib/systemd/system/postgres-server.service
+0100644 0 0 /usr/lib/systemd/system/postgres-server-setup.service
+0100644 0 0 /usr/share/openstack/swift-controller.yml
+0100644 0 0 /usr/lib/systemd/system/swift-controller-setup.service
+0100644 0 0 /usr/lib/systemd/system/swift-proxy.service
+0040755 0 0 /usr/share/swift
+0040755 0 0 /usr/share/swift/etc
+0040755 0 0 /usr/share/swift/etc/swift
+0100644 0 0 /usr/share/swift/etc/swift/proxy-server.j2
diff --git a/install-files/openstack/usr/lib/sysctl.d/neutron.conf b/install-files/openstack/usr/lib/sysctl.d/neutron.conf
new file mode 100644
index 00000000..644ca116
--- /dev/null
+++ b/install-files/openstack/usr/lib/sysctl.d/neutron.conf
@@ -0,0 +1,3 @@
+# Disable rp filtering, enabling forwarding is handled by networkd
+net.ipv4.conf.all.rp_filter=0
+net.ipv4.conf.default.rp_filter=0
diff --git a/install-files/openstack/usr/lib/systemd/system/apache-httpd.service b/install-files/openstack/usr/lib/systemd/system/apache-httpd.service
new file mode 100644
index 00000000..e2a840c6
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/apache-httpd.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Apache Web Server
+After=network.target remote-fs.target nss-lookup.target
+Wants=network.target
+
+[Service]
+Type=forking
+PIDFile=/var/run/httpd.pid
+ExecStart=/usr/sbin/apachectl start
+ExecStop=/usr/sbin/apachectl graceful-stop
+ExecReload=/usr/sbin/apachectl graceful
+PrivateTmp=true
+LimitNOFILE=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/iscsi-setup.service b/install-files/openstack/usr/lib/systemd/system/iscsi-setup.service
new file mode 100644
index 00000000..4cb10045
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/iscsi-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run iscsi-setup Ansible scripts
+Before=iscsid.service target.service
+Wants=iscsid.service target.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/iscsi.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service
new file mode 100644
index 00000000..6e3ada59
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-evaluator.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer alarm evaluation service
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-alarm-evaluator --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service
new file mode 100644
index 00000000..7a3e1c91
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-alarm-notifier.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer alarm notification service
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-alarm-notifier --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service
new file mode 100644
index 00000000..eb0293bf
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-api.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer API service
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-api --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service
new file mode 100644
index 00000000..a1bc11ee
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-central.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer central agent
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service openstack-ceilometer-collector.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-agent-central --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service
new file mode 100644
index 00000000..dafc3ac7
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-collector.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer collection service
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-collector --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service
new file mode 100644
index 00000000..9fe8a1e6
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-compute.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer compute agent
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-agent-compute --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service
new file mode 100644
index 00000000..c3e809d7
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-config-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run ceilometer-config-setup Ansible scripts
+ConditionPathExists=/etc/openstack/ceilometer.conf
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ceilometer-config.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service
new file mode 100644
index 00000000..7a785227
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-db-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run ceilometer-db-setup Ansible scripts
+ConditionPathExists=/etc/openstack/ceilometer.conf
+After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-ceilometer-config-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ceilometer-db.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service
new file mode 100644
index 00000000..6696116e
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ceilometer-notification.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack ceilometer notification agent
+ConditionPathExists=/etc/ceilometer/ceilometer.conf
+After=network-online.target openstack-ceilometer-config-setup.service openstack-ceilometer-db-setup.service openstack-ceilometer-collector.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ceilometer
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ceilometer-agent-notification --config-file /etc/ceilometer/ceilometer.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-api.service
new file mode 100644
index 00000000..a284f31d
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-api.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Volume Service (code-named Cinder) API server
+ConditionPathExists=/etc/cinder/cinder.conf
+After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=cinder
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/cinder-api --config-file /etc/cinder/cinder.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-backup.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-backup.service
new file mode 100644
index 00000000..c14e13aa
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-backup.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Cinder backup server
+ConditionPathExists=/etc/cinder/cinder.conf
+After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-volume.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=cinder
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/cinder-backup --config-file /etc/cinder/cinder.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service
new file mode 100644
index 00000000..1c966933
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-config-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run cinder-config-setup Ansible scripts
+ConditionPathExists=/etc/openstack/cinder.conf
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-config.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service
new file mode 100644
index 00000000..a3c66d67
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-db-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run cinder-db-setup Ansible scripts
+ConditionPathExists=/etc/openstack/cinder.conf
+After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-cinder-config-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-db.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service
new file mode 100644
index 00000000..82e9b08d
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-lv-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run cinder-lvs-setup Ansible scripts
+ConditionPathExists=/etc/openstack/cinder.conf
+Wants=lvm2-lvmetad.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/cinder-lvs.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service
new file mode 100644
index 00000000..f205aaff
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-scheduler.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Cinder scheduler server
+ConditionPathExists=/etc/cinder/cinder.conf
+After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-volume.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=cinder
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/cinder-scheduler --config-file /etc/cinder/cinder.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-cinder-volume.service b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-volume.service
new file mode 100644
index 00000000..c56ee693
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-cinder-volume.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Cinder volume server
+ConditionPathExists=/etc/cinder/cinder.conf
+After=network-online.target openstack-cinder-config-setup.service openstack-cinder-db-setup.service openstack-cinder-lv-setup.service lvm2-lvmetad.service iscsid.service target.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=cinder
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/cinder-volume --config-file /etc/cinder/cinder.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-glance-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-glance-api.service
new file mode 100644
index 00000000..4c34ff10
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-glance-api.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Image Service (code-named Glance) API server
+ConditionPathExists=/etc/glance/glance-api.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=glance
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/glance-api --config-file /etc/glance/glance-api.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-glance-registry.service b/install-files/openstack/usr/lib/systemd/system/openstack-glance-registry.service
new file mode 100644
index 00000000..d53c8b33
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-glance-registry.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Image Service (code-named Glance) Registry server
+ConditionPathExists=/etc/glance/glance-registry.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=glance
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/glance-registry --config-file /etc/glance/glance-registry.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-glance-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-glance-setup.service
new file mode 100644
index 00000000..43810797
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-glance-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run glance-setup Ansible scripts
+ConditionPathExists=/etc/openstack/glance.conf
+After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service
+Wants=network-online.target
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/glance.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-horizon-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-horizon-setup.service
new file mode 100644
index 00000000..9ec3197a
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-horizon-setup.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Run horizon-setup Ansible scripts
+After=local-fs.target
+Before=apache-httpd.service
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/horizon.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ironic-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-api.service
new file mode 100644
index 00000000..5a286a95
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-api.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Bare Metal Provisioning Service (code-named Ironic) API server
+ConditionPathExists=/etc/ironic/ironic.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ironic
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ironic-api --config-file /etc/ironic/ironic.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service
new file mode 100644
index 00000000..b3b226e0
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-conductor.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Bare Metal Provisioning Service (code-named Ironic) Conductor server
+ConditionPathExists=/etc/ironic/ironic.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=ironic
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/ironic-conductor --config-file /etc/ironic/ironic.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-ironic-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-setup.service
new file mode 100644
index 00000000..e3a58eb5
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-ironic-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run ironic-setup Ansible scripts
+ConditionPathExists=/etc/openstack/ironic.conf
+After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service
+Wants=network-online.target
+
+[Service]
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/ironic.yml
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-keystone-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-keystone-setup.service
new file mode 100644
index 00000000..db9d0b2b
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-keystone-setup.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Run keystone-setup Ansible scripts
+ConditionPathExists=/etc/openstack/keystone.conf
+After=local-fs.target network-online.target postgres-server-setup.service
+Wants=network-online.target
+
+[Service]
+# Oneshot, since others setup have to wait until this service finishes
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/keystone.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-keystone.service b/install-files/openstack/usr/lib/systemd/system/openstack-keystone.service
new file mode 100644
index 00000000..6f6ff644
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-keystone.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Identity Service (code-named Keystone)
+ConditionPathExists=/etc/keystone/keystone.conf
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=notify
+Restart=always
+User=keystone
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/keystone-all --config-file /etc/keystone/keystone.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-network-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-network-setup.service
new file mode 100644
index 00000000..021370d9
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-network-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run Ansible scripts to configure internal network for OpenStack
+After=openvswitch-setup.service openvswitch.service
+Before=systemd-networkd.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/network.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service
new file mode 100644
index 00000000..b74f44ab
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-config-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run neutron-config-setup Ansible scripts
+ConditionPathExists=/etc/openstack/neutron.conf
+After=network-online.target openstack-keystone-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/neutron-config.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service
new file mode 100644
index 00000000..5d07da2e
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-db-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run neutron-db-setup Ansible scripts
+ConditionPathExists=/etc/openstack/neutron.conf
+After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-neutron-config-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/neutron-db.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service
new file mode 100644
index 00000000..9080f3c1
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-dhcp-agent.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Neutron DHCP Agent
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openstack-neutron-ovs-cleanup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-dhcp-agent \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/dhcp_agent.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service
new file mode 100644
index 00000000..76efea5c
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-l3-agent.service
@@ -0,0 +1,18 @@
+[Unit]
+Description=Neutron Layer 3 Agent
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openstack-neutron-ovs-cleanup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-l3-agent \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/l3_agent.ini \
+ --config-file=/etc/neutron/fwaas_driver.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service
new file mode 100644
index 00000000..20540e4c
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-metadata-agent.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Neutron Metadata Plugin Agent
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-metadata-agent \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/metadata_agent.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service
new file mode 100644
index 00000000..f5709028
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-ovs-cleanup.service
@@ -0,0 +1,18 @@
+[Unit]
+Description=Neutron OVS cleanup
+ConditionPathExists=/etc/neutron/neutron.conf
+ConditionFileIsExecutable=/usr/bin/neutron-ovs-cleanup
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service openvswitch.service
+Before=openstack-neutron-plugin-openvswitch-agent.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+StandardOutput=null
+StandardError=null
+User=neutron
+ExecStart=/usr/bin/neutron-ovs-cleanup --config-file /etc/neutron/neutron.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service
new file mode 100644
index 00000000..6c579a62
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-plugin-openvswitch-agent.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Neutron OpenvSwitch Plugin Agent
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-openvswitch-agent \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-neutron-server.service b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-server.service
new file mode 100644
index 00000000..6376c3d8
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-neutron-server.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Neutron Api Server
+ConditionPathExists=/etc/neutron/neutron.conf
+After=network-online.target openstack-neutron-config-setup.service openstack-neutron-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=neutron
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/neutron-server \
+ --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-api.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-api.service
new file mode 100644
index 00000000..521353db
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-api.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Compute Service (code-named Nova) API server
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-api --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-cert.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-cert.service
new file mode 100644
index 00000000..b3733816
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-cert.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Nova Cert
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-cert --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-compute.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-compute.service
new file mode 100644
index 00000000..4f9b8196
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-compute.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenStack Compute Service (code-named Nova) compute server
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service libvirtd.service
+Wants=network-online.target
+Requires=libvirtd.service
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-conductor.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-conductor.service
new file mode 100644
index 00000000..4c0d7d43
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-conductor.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Database-access support for Compute nodes (nova-conductor)
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service libvirtd.service
+Wants=network-online.target
+Requires=libvirtd.service
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-conductor --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service
new file mode 100644
index 00000000..df669aa9
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-config-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run nova-config-setup Ansible scripts
+ConditionPathExists=/etc/openstack/nova.conf
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/nova-config.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service
new file mode 100644
index 00000000..e22780a9
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-consoleauth.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Openstack Console Auth (nova-consoleauth)
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-consoleauth --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service
new file mode 100644
index 00000000..8e004327
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-db-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run nova-db-setup Ansible scripts
+ConditionPathExists=/etc/openstack/nova.conf
+After=network-online.target postgres-server-setup.service openstack-keystone-setup.service openstack-nova-config-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/nova-db.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service
new file mode 100644
index 00000000..8cbb20fd
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-novncproxy.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Nova NoVNC proxy
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-novncproxy --config-file /etc/nova/nova.conf --web /usr/share/novnc
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service
new file mode 100644
index 00000000..e89f0d3e
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-scheduler.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Nova Scheduler
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-scheduler --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service b/install-files/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service
new file mode 100644
index 00000000..30af8305
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openstack-nova-serialproxy.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=OpenStack Nova Serial Proxy
+ConditionPathExists=/etc/nova/nova.conf
+After=network-online.target openstack-nova-config-setup.service openstack-nova-db-setup.service
+Wants=network-online.target
+
+[Service]
+Type=simple
+User=nova
+StandardOutput=null
+StandardError=null
+ExecStart=/usr/bin/nova-serialproxy --config-file /etc/nova/nova.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openvswitch-db-server.service b/install-files/openstack/usr/lib/systemd/system/openvswitch-db-server.service
new file mode 100644
index 00000000..34a7c812
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openvswitch-db-server.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Open vSwitch Database Server Daemon
+After=local-fs.target
+
+[Service]
+Type=forking
+ExecStartPre=-/usr/bin/mkdir -p /var/run/openvswitch
+ExecStart=/usr/sbin/ovsdb-server --remote=punix:/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,Open_vSwitch,manager_options --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --pidfile --detach
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/install-files/openstack/usr/lib/systemd/system/openvswitch-setup.service b/install-files/openstack/usr/lib/systemd/system/openvswitch-setup.service
new file mode 100644
index 00000000..8393ebbc
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openvswitch-setup.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Run openvswitch-setup Ansible scripts
+After=local-fs.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/openstack/hosts /usr/share/openstack/openvswitch.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/openvswitch.service b/install-files/openstack/usr/lib/systemd/system/openvswitch.service
new file mode 100644
index 00000000..113911f6
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/openvswitch.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Open vSwitch Daemon
+Before=network-pre.target
+Wants=network-pre.target
+
+[Service]
+Type=forking
+ExecStartPre=-/usr/bin/mkdir -p /var/run/openvswitch
+ExecStart=/usr/sbin/ovs-vswitchd --pidfile --detach
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/postgres-server-setup.service b/install-files/openstack/usr/lib/systemd/system/postgres-server-setup.service
new file mode 100644
index 00000000..202c0636
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/postgres-server-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run postgres-setup Ansible scripts
+ConditionPathExists=/etc/openstack/postgres.conf
+After=local-fs.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -i /usr/share/openstack/hosts /usr/share/openstack/postgres.yml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/postgres-server.service b/install-files/openstack/usr/lib/systemd/system/postgres-server.service
new file mode 100644
index 00000000..9e11f26d
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/postgres-server.service
@@ -0,0 +1,26 @@
+[Unit]
+Description=PostgreSQL database server
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=forking
+TimeoutSec=120
+User=postgres
+Group=postgres
+
+Environment=PGROOT=/var/lib/pgsql
+
+SyslogIdentifier=postgres
+PIDFile=/var/lib/pgsql/data/postmaster.pid
+
+ExecStart=/usr/bin/pg_ctl -s -D ${PGROOT}/data start -w -t 120
+ExecReload=/usr/bin/pg_ctl -s -D ${PGROOT}/data reload
+ExecStop=/usr/bin/pg_ctl -s -D ${PGROOT}/data stop -m fast
+
+# Due to PostgreSQL's use of shared memory, OOM killer is often overzealous in
+# killing Postgres, so adjust it downward
+OOMScoreAdjust=-200
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/rabbitmq-server.service b/install-files/openstack/usr/lib/systemd/system/rabbitmq-server.service
new file mode 100644
index 00000000..1a20f3e4
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/rabbitmq-server.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=RabbitMQ broker
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=notify
+User=rabbitmq
+Group=rabbitmq
+Environment=HOME=/var/lib/rabbitmq
+WorkingDirectory=/var/lib/rabbitmq
+ExecStart=/usr/sbin/rabbitmq-server
+ExecStop=/usr/sbin/rabbitmqctl stop
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/swift-controller-setup.service b/install-files/openstack/usr/lib/systemd/system/swift-controller-setup.service
new file mode 100644
index 00000000..ccfbcbe6
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/swift-controller-setup.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Run swift-controller-setup (once)
+After=local-fs.target network-online.target postgres-server-setup.service openstack-keystone-setup.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/usr/bin/ansible-playbook -v -M /usr/share/ansible/ansible-openstack-modules -i /usr/share/openstack/hosts /usr/share/openstack/swift-controller.yml
+Restart=no
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/lib/systemd/system/swift-proxy.service b/install-files/openstack/usr/lib/systemd/system/swift-proxy.service
new file mode 100644
index 00000000..7b0a2e17
--- /dev/null
+++ b/install-files/openstack/usr/lib/systemd/system/swift-proxy.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=OpenStack Swift Proxy Server
+After=network-online.target swift-controller-setup.service memcached.service
+Wants=network-online.target
+
+[Service]
+Type=forking
+PIDFile=/var/run/swift/proxy-server.pid
+Restart=on-failure
+ExecStart=/usr/bin/swift-init proxy-server start
+ExecStop=/usr/bin/swift-init proxy-server stop
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install-files/openstack/usr/share/openstack/ceilometer-config.yml b/install-files/openstack/usr/share/openstack/ceilometer-config.yml
new file mode 100644
index 00000000..9850d84d
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/ceilometer-config.yml
@@ -0,0 +1,36 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/ceilometer.conf"
+ tasks:
+# Configure ceilometer
+ - name: Create the ceilometer user.
+ user:
+ name: ceilometer
+ comment: Openstack Ceilometer Daemons
+ shell: /sbin/nologin
+ home: /var/lib/ceilometer
+
+ - name: Create the /var folders for ceilometer
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: ceilometer
+ group: ceilometer
+ with_items:
+ - /var/run/ceilometer
+ - /var/lock/ceilometer
+ - /var/log/ceilometer
+ - /var/lib/ceilometer
+
+ - name: Create /etc/ceilometer directory
+ file:
+ path: /etc/ceilometer
+ state: directory
+
+ - name: Add the configuration needed for ceilometer in /etc/ceilometer using templates
+ template:
+ src: /usr/share/openstack/ceilometer/{{ item }}
+ dest: /etc/ceilometer/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/ceilometer && find -type f
diff --git a/install-files/openstack/usr/share/openstack/ceilometer-db.yml b/install-files/openstack/usr/share/openstack/ceilometer-db.yml
new file mode 100644
index 00000000..717c7d7d
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/ceilometer-db.yml
@@ -0,0 +1,50 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/ceilometer.conf"
+ tasks:
+ - name: Create ceilometer service user in service tenant
+ keystone_user:
+ user: "{{ CEILOMETER_SERVICE_USER }}"
+ password: "{{ CEILOMETER_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Assign admin role to ceilometers service user in the service tenant
+ keystone_user:
+ role: admin
+ user: "{{ CEILOMETER_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add ceilometer endpoint
+ keystone_service:
+ name: ceilometer
+ type: metering
+ description: Openstack Metering Service
+ publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777
+ internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777
+ adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:8777
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for ceilometer
+ postgresql_user:
+ name: "{{ CEILOMETER_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ password: "{{ CEILOMETER_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: ceilometer
+
+ - name: Create database for ceilometer services
+ postgresql_db:
+ name: ceilometer
+ owner: "{{ CEILOMETER_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ sudo: yes
+ sudo_user: ceilometer
+
+ - name: Initiate ceilometer database
+ command: ceilometer-dbsync
+ sudo: yes
+ sudo_user: ceilometer
diff --git a/install-files/openstack/usr/share/openstack/ceilometer/ceilometer.conf b/install-files/openstack/usr/share/openstack/ceilometer/ceilometer.conf
new file mode 100644
index 00000000..b572d40f
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/ceilometer/ceilometer.conf
@@ -0,0 +1,1023 @@
+[DEFAULT]
+
+#
+# Options defined in ceilometer.middleware
+#
+
+# Exchanges name to listen for notifications. (multi valued)
+#http_control_exchanges=nova
+#http_control_exchanges=glance
+#http_control_exchanges=neutron
+#http_control_exchanges=cinder
+
+
+#
+# Options defined in ceilometer.pipeline
+#
+
+# Configuration file for pipeline definition. (string value)
+#pipeline_cfg_file=pipeline.yaml
+
+
+#
+# Options defined in ceilometer.sample
+#
+
+# Source for samples emitted on this instance. (string value)
+# Deprecated group/name - [DEFAULT]/counter_source
+#sample_source=openstack
+
+
+#
+# Options defined in ceilometer.service
+#
+
+# Name of this node, which must be valid in an AMQP key. Can
+# be an opaque identifier. For ZeroMQ only, must be a valid
+# host name, FQDN, or IP address. (string value)
+#host=ceilometer
+
+# Dispatcher to process data. (multi valued)
+#dispatcher=database
+
+# Number of workers for collector service. A single
+# collector is enabled by default. (integer value)
+#collector_workers=1
+
+# Number of workers for notification service. A single
+# notification agent is enabled by default. (integer value)
+#notification_workers=1
+
+
+#
+# Options defined in ceilometer.api.app
+#
+
+# The strategy to use for auth: noauth or keystone. (string
+# value)
+auth_strategy=keystone
+
+# Deploy the deprecated v1 API. (boolean value)
+#enable_v1_api=true
+
+
+#
+# Options defined in ceilometer.compute.notifications
+#
+
+# Exchange name for Nova notifications. (string value)
+#nova_control_exchange=nova
+
+
+#
+# Options defined in ceilometer.compute.util
+#
+
+# List of metadata prefixes reserved for metering use. (list
+# value)
+#reserved_metadata_namespace=metering.
+
+# Limit on length of reserved metadata values. (integer value)
+#reserved_metadata_length=256
+
+
+#
+# Options defined in ceilometer.compute.virt.inspector
+#
+
+# Inspector to use for inspecting the hypervisor layer.
+# (string value)
+#hypervisor_inspector=libvirt
+
+
+#
+# Options defined in ceilometer.compute.virt.libvirt.inspector
+#
+
+# Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
+# xen). (string value)
+#libvirt_type=kvm
+
+# Override the default libvirt URI (which is dependent on
+# libvirt_type). (string value)
+#libvirt_uri=
+
+
+#
+# Options defined in ceilometer.image.notifications
+#
+
+# Exchange name for Glance notifications. (string value)
+#glance_control_exchange=glance
+
+
+#
+# Options defined in ceilometer.network.notifications
+#
+
+# Exchange name for Neutron notifications. (string value)
+# Deprecated group/name - [DEFAULT]/quantum_control_exchange
+#neutron_control_exchange=neutron
+
+
+#
+# Options defined in ceilometer.objectstore.swift
+#
+
+# Swift reseller prefix. Must be on par with reseller_prefix
+# in proxy-server.conf. (string value)
+#reseller_prefix=AUTH_
+
+
+#
+# Options defined in ceilometer.openstack.common.db.sqlalchemy.session
+#
+
+# The file name to use with SQLite (string value)
+#sqlite_db=ceilometer.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous=true
+
+
+#
+# Options defined in ceilometer.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in ceilometer.openstack.common.lockutils
+#
+
+# Whether to disable inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in ceilometer.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
+
+# Publish error events (boolean value)
+#publish_errors=false
+
+# Make deprecations fatal (boolean value)
+#fatal_deprecations=false
+
+# If an instance is passed with the log message, format it
+# like this (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# If an instance UUID is passed with the log message, format
+# it like this (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of logging configuration file. It does not disable
+# existing loggers, but just appends specified logging
+# configuration to any other existing logging options. Please
+# see the Python logging module documentation for details on
+# logging configuration files. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and then will be changed in J to honor RFC5424
+# (boolean value)
+use_syslog=true
+
+# (Optional) Use syslog rfc5424 format for logging. If
+# enabled, will add APP-NAME (RFC5424) before the MSG part of
+# the syslog message. The old format without APP-NAME is
+# deprecated in I, and will be removed in J. (boolean value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in ceilometer.openstack.common.middleware.sizelimit
+#
+
+# The maximum body size per request, in bytes (integer value)
+# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
+#max_request_body_size=114688
+
+
+#
+# Options defined in ceilometer.openstack.common.notifier.api
+#
+
+# Driver or drivers to handle sending notifications (multi
+# valued)
+#notification_driver=
+
+# Default notification level for outgoing notifications
+# (string value)
+#default_notification_level=INFO
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in ceilometer.openstack.common.notifier.rpc_notifier
+#
+
+# AMQP topic used for OpenStack notifications (list value)
+#notification_topics=notifications
+
+
+#
+# Options defined in ceilometer.openstack.common.policy
+#
+
+# JSON file containing policy (string value)
+#policy_file=policy.json
+
+# Rule enforced when requested rule is not found (string
+# value)
+#policy_default_rule=default
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc
+#
+
+# The messaging module to use, defaults to kombu. (string
+# value)
+rpc_backend=rabbit
+
+# Size of RPC thread pool (integer value)
+#rpc_thread_pool_size=64
+
+# Size of RPC connection pool (integer value)
+#rpc_conn_pool_size=30
+
+# Seconds to wait for a response from call or multicall
+# (integer value)
+#rpc_response_timeout=60
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions
+
+# If passed, use a fake RabbitMQ provider (boolean value)
+#fake_rabbit=false
+
+# AMQP exchange to connect to if using RabbitMQ or Qpid
+# (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.amqp
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.impl_kombu
+#
+
+# If SSL is enabled, the SSL version to use. Valid values are
+# TLSv1, SSLv23 and SSLv3. SSLv2 might be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled) (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled) (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL enabled)
+# (string value)
+#kombu_ssl_ca_certs=
+
+# The RabbitMQ broker address where a single node is used
+# (string value)
+rabbit_host = {{ RABBITMQ_HOST }}
+
+
+# The RabbitMQ broker port where a single node is used
+# (integer value)
+rabbit_port= {{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ (boolean value)
+rabbit_use_ssl=false
+
+# The RabbitMQ userid (string value)
+rabbit_userid= {{ RABBITMQ_USER }}
+
+# The RabbitMQ password (string value)
+rabbit_password = {{ RABBITMQ_PASSWORD }}
+
+
+# The RabbitMQ virtual host (string value)
+rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count) (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.impl_qpid
+#
+
+# Qpid broker hostname (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for qpid connection (string value)
+#qpid_username=
+
+# Password for qpid connection (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl' (string value)
+#qpid_protocol=tcp
+
+# Disable Nagle algorithm (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.impl_zmq
+#
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver (string value)
+#rpc_zmq_matchmaker=ceilometer.openstack.common.rpc.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1 (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=ceilometer
+
+
+#
+# Options defined in ceilometer.openstack.common.rpc.matchmaker
+#
+
+# Heartbeat frequency (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+
+#
+# Options defined in ceilometer.orchestration.notifications
+#
+
+# Exchange name for Heat notifications (string value)
+#heat_control_exchange=heat
+
+
+#
+# Options defined in ceilometer.storage
+#
+
+# DEPRECATED - Database connection string. (string value)
+#database_connection=<None>
+
+
+#
+# Options defined in ceilometer.storage.sqlalchemy.models
+#
+
+# MySQL engine to use. (string value)
+#mysql_engine=InnoDB
+
+
+#
+# Options defined in ceilometer.volume.notifications
+#
+
+# Exchange name for Cinder notifications. (string value)
+cinder_control_exchange=cinder
+
+
+[alarm]
+
+#
+# Options defined in ceilometer.cli
+#
+
+# Class to launch as alarm evaluation service. (string value)
+#evaluation_service=ceilometer.alarm.service.SingletonAlarmService
+
+
+#
+# Options defined in ceilometer.alarm.notifier.rest
+#
+
+# SSL Client certificate for REST notifier. (string value)
+#rest_notifier_certificate_file=
+
+# SSL Client private key for REST notifier. (string value)
+#rest_notifier_certificate_key=
+
+# Whether to verify the SSL Server certificate when calling
+# alarm action. (boolean value)
+#rest_notifier_ssl_verify=true
+
+
+#
+# Options defined in ceilometer.alarm.rpc
+#
+
+# The topic that ceilometer uses for alarm notifier messages.
+# (string value)
+#notifier_rpc_topic=alarm_notifier
+
+# The topic that ceilometer uses for alarm partition
+# coordination messages. (string value)
+#partition_rpc_topic=alarm_partition_coordination
+
+
+#
+# Options defined in ceilometer.alarm.service
+#
+
+# Period of evaluation cycle, should be >= than configured
+# pipeline interval for collection of underlying metrics.
+# (integer value)
+# Deprecated group/name - [alarm]/threshold_evaluation_interval
+#evaluation_interval=60
+
+
+#
+# Options defined in ceilometer.api.controllers.v2
+#
+
+# Record alarm change events. (boolean value)
+#record_history=true
+
+
+[api]
+
+#
+# Options defined in ceilometer.api
+#
+
+# The port for the ceilometer API server. (integer value)
+# Deprecated group/name - [DEFAULT]/metering_api_port
+#port=8777
+
+# The listen IP for the ceilometer API server. (string value)
+#host=0.0.0.0
+
+
+[collector]
+
+#
+# Options defined in ceilometer.collector
+#
+
+# Address to which the UDP socket is bound. Set to an empty
+# string to disable. (string value)
+#udp_address=0.0.0.0
+
+# Port to which the UDP socket is bound. (integer value)
+#udp_port=4952
+
+
+[database]
+
+#
+# Options defined in ceilometer.openstack.common.db.api
+#
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+
+#
+# Options defined in ceilometer.openstack.common.db.sqlalchemy.session
+#
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection=postgresql://{{ CEILOMETER_DB_USER }}:{{ CEILOMETER_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/ceilometer
+
+# The SQLAlchemy connection string used to connect to the
+# slave database (string value)
+#slave_connection=
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+
+#
+# Options defined in ceilometer.storage
+#
+
+# Number of seconds that samples are kept in the database for
+# (<= 0 means forever). (integer value)
+#time_to_live=-1
+
+
+[dispatcher_file]
+
+#
+# Options defined in ceilometer.dispatcher.file
+#
+
+# Name and the location of the file to record meters. (string
+# value)
+#file_path=<None>
+
+# The max size of the file. (integer value)
+#max_bytes=0
+
+# The max number of the files to keep. (integer value)
+#backup_count=0
+
+
+[event]
+
+#
+# Options defined in ceilometer.event.converter
+#
+
+# Configuration file for event definitions. (string value)
+#definitions_cfg_file=event_definitions.yaml
+
+# Drop notifications if no event definition matches.
+# (Otherwise, we convert them with just the default traits)
+# (boolean value)
+#drop_unmatched_notifications=false
+
+
+[keystone_authtoken]
+
+#
+# Options defined in keystoneclient.middleware.auth_token
+#
+
+# Prefix to prepend at the beginning of the path (string
+# value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint (string
+# value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint(http or https)
+# (string value)
+#auth_protocol=https
+
+# Complete public Identity API endpoint (string value)
+auth_uri= http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+
+
+# API version of the admin Identity API endpoint (string
+# value)
+#auth_version=<None>
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (boolean value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# Allows to pass in the name of a fake http_handler callback
+# function used instead of httplib.HTTPConnection or
+# httplib.HTTPSConnection. Useful for unit testing where
+# network is not available. (string value)
+#http_handler=<None>
+
+# Single shared secret with the Keystone configuration used
+# for bootstrapping a Keystone installation, or otherwise
+# bypassing the normal authentication process. (string value)
+#admin_token=<None>
+
+# Keystone account username (string value)
+admin_user = {{ CEILOMETER_SERVICE_USER }}
+
+# Keystone account password (string value)
+admin_password = {{ CEILOMETER_SERVICE_PASSWORD }}
+
+# Keystone service account tenant name to validate user tokens
+# (string value)
+admin_tenant_name = service
+
+# Env key for the swift cache (string value)
+#cache=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#certfile=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPS connections. Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens (string
+# value)
+#signing_dir=<None>
+
+# If defined, the memcache server(s) to use for caching (list
+# value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive requests and validations, the
+# middleware uses an in-memory cache for the tokens the
+# Keystone API returns. This is only valid if memcache_servers
+# is defined. Set to -1 to disable caching completely.
+# (integer value)
+#token_cache_time=300
+
+# Value only used for unit testing (integer value)
+#revocation_cache_time=1
+
+# (optional) if defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=<None>
+
+# (optional, mandatory if memcache_security_strategy is
+# defined) this string is used for key derivation. (string
+# value)
+#memcache_secret_key=<None>
+
+# (optional) indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+
+[matchmaker_redis]
+
+#
+# Options defined in ceilometer.openstack.common.rpc.matchmaker_redis
+#
+
+# Host to locate redis (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server. (optional) (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in ceilometer.openstack.common.rpc.matchmaker_ring
+#
+
+# Matchmaker ring file (JSON) (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[notification]
+
+#
+# Options defined in ceilometer.notification
+#
+
+# Acknowledge message when event persistence fails. (boolean
+# value)
+#ack_on_event_error=true
+
+# Save event details. (boolean value)
+#store_events=false
+
+
+[publisher]
+
+#
+# Options defined in ceilometer.publisher.utils
+#
+
+# Secret value for signing metering messages. (string value)
+# Deprecated group/name - [DEFAULT]/metering_secret
+# Deprecated group/name - [publisher_rpc]/metering_secret
+# It should be set to some random value
+metering_secret = {{ METERING_SECRET }}
+
+[publisher_rpc]
+
+#
+# Options defined in ceilometer.publisher.rpc
+#
+
+# The topic that ceilometer uses for metering messages.
+# (string value)
+#metering_topic=metering
+
+
+[rpc_notifier2]
+
+#
+# Options defined in ceilometer.openstack.common.notifier.rpc_notifier2
+#
+
+# AMQP topic(s) used for OpenStack notifications (list value)
+#topics=notifications
+
+
+[service_credentials]
+
+#
+# Options defined in ceilometer.service
+#
+
+# User name to use for OpenStack service access. (string
+# value)
+os_username = {{ CEILOMETER_SERVICE_USER }}
+
+# Password to use for OpenStack service access. (string value)
+os_password = {{ CEILOMETER_SERVICE_PASSWORD }}
+
+# Tenant ID to use for OpenStack service access. (string
+# value)
+#os_tenant_id=
+
+# Tenant name to use for OpenStack service access. (string
+# value)
+os_tenant_name = service
+
+# Certificate chain for SSL validation. (string value)
+#os_cacert=<None>
+
+# Auth URL to use for OpenStack service access. (string value)
+os_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+
+# Region name to use for OpenStack service endpoints. (string
+# value)
+os_region_name=regionOne
+
+# Type of endpoint in Identity service catalog to use for
+# communication with OpenStack services. (string value)
+os_endpoint_type=internalURL
+
+# Disables X.509 certificate validation when an SSL connection
+# to Identity Service is established. (boolean value)
+#insecure=false
+
+
+[ssl]
+
+#
+# Options defined in ceilometer.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely
+# (string value)
+#key_file=<None>
+
+
+[vmware]
+
+#
+# Options defined in ceilometer.compute.virt.vmware.inspector
+#
+
+# IP address of the VMware Vsphere host (string value)
+#host_ip=
+
+# Username of VMware Vsphere (string value)
+#host_username=
+
+# Password of VMware Vsphere (string value)
+#host_password=
+
+# Number of times a VMware Vsphere API must be retried
+# (integer value)
+#api_retry_count=10
+
+# Sleep time in seconds for polling an ongoing async task
+# (floating point value)
+#task_poll_interval=0.5
diff --git a/install-files/openstack/usr/share/openstack/cinder-config.yml b/install-files/openstack/usr/share/openstack/cinder-config.yml
new file mode 100644
index 00000000..fd3e2cd0
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/cinder-config.yml
@@ -0,0 +1,37 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/cinder.conf"
+ tasks:
+# Configure cinder
+ - name: Create the cinder user.
+ user:
+ name: cinder
+ comment: Openstack Cinder Daemons
+ shell: /sbin/nologin
+ home: /var/lib/cinder
+
+ - name: Create the /var folders for cinder
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: cinder
+ group: cinder
+ with_items:
+ - /var/run/cinder
+ - /var/lock/cinder
+ - /var/log/cinder
+ - /var/lib/cinder
+ - /var/lib/cinder/volumes
+
+ - name: Create /etc/cinder directory
+ file:
+ path: /etc/cinder
+ state: directory
+
+ - name: Add the configuration needed for cinder in /etc/cinder using templates
+ template:
+ src: /usr/share/openstack/cinder/{{ item }}
+ dest: /etc/cinder/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/cinder && find -type f
diff --git a/install-files/openstack/usr/share/openstack/cinder-db.yml b/install-files/openstack/usr/share/openstack/cinder-db.yml
new file mode 100644
index 00000000..2a211720
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/cinder-db.yml
@@ -0,0 +1,60 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/cinder.conf"
+ tasks:
+ - name: Create cinder service user in service tenant
+ keystone_user:
+ user: "{{ CINDER_SERVICE_USER }}"
+ password: "{{ CINDER_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Assign admin role to cinder service user in the service tenant
+ keystone_user:
+ role: admin
+ user: "{{ CINDER_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add cinder endpoint
+ keystone_service:
+ name: cinder
+ type: volume
+ description: Openstack Block Storage
+ publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s'
+ internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s'
+ adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v1/%(tenant_id)s'
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add cinderv2 endpoint
+ keystone_service:
+ name: cinderv2
+ type: volumev2
+ description: Openstack Block Storage
+ publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s'
+ internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s'
+ adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:8776/v2/%(tenant_id)s'
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for cinder
+ postgresql_user:
+ name: "{{ CINDER_DB_USER }}"
+ password: "{{ CINDER_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: cinder
+
+ - name: Create database for cinder services
+ postgresql_db:
+ name: cinder
+ owner: "{{ CINDER_DB_USER }}"
+ sudo: yes
+ sudo_user: cinder
+
+ - name: Initiate cinder database
+ cinder_manage:
+ action: dbsync
+ sudo: yes
+ sudo_user: cinder
diff --git a/install-files/openstack/usr/share/openstack/cinder-lvs.yml b/install-files/openstack/usr/share/openstack/cinder-lvs.yml
new file mode 100644
index 00000000..7a91a306
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/cinder-lvs.yml
@@ -0,0 +1,21 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/cinder.conf"
+ tasks:
+ - name: Check that CINDER_DEVICE exists
+ stat:
+ path: "{{ CINDER_DEVICE }}"
+ register: cinder_device_stats
+ failed_when: cinder_device_stats.stat.exists == false
+
+ - name: Configure LVM group for cinder
+ lvg:
+ vg: cinder-volumes
+ pvs: "{{ CINDER_DEVICE }}"
+
+ - lineinfile:
+ dest: /etc/lvm/lvm.conf
+ regexp: '# filter = \[ \"a\/\.\*/\" \]'
+ line: ' filter = [ "a|{{ CINDER_DEVICE }}|", "r/.*/" ]'
+ backrefs: yes
diff --git a/install-files/openstack/usr/share/openstack/cinder/api-paste.ini b/install-files/openstack/usr/share/openstack/cinder/api-paste.ini
new file mode 100644
index 00000000..ba922d5f
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/cinder/api-paste.ini
@@ -0,0 +1,60 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/install-files/openstack/usr/share/openstack/cinder/cinder.conf b/install-files/openstack/usr/share/openstack/cinder/cinder.conf
new file mode 100644
index 00000000..a58004b5
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/cinder/cinder.conf
@@ -0,0 +1,2825 @@
+[DEFAULT]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1 and SSLv23. SSLv2 and SSLv3 may be available on
+# some distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+rabbit_host={{ RABBITMQ_HOST }}
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+rabbit_port={{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBITMQ_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBITMQ_PASSWORD }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
+# (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=cinder
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+notification_driver=messagingv2
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+control_exchange=cinder
+
+
+#
+# Options defined in cinder.exception
+#
+
+# Make exception message format errors fatal. (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in cinder.quota
+#
+
+# Number of volumes allowed per project (integer value)
+#quota_volumes=10
+
+# Number of volume snapshots allowed per project (integer
+# value)
+#quota_snapshots=10
+
+# Number of consistencygroups allowed per project (integer
+# value)
+#quota_consistencygroups=10
+
+# Total amount of storage, in gigabytes, allowed for volumes
+# and snapshots per project (integer value)
+#quota_gigabytes=1000
+
+# Number of volume backups allowed per project (integer value)
+#quota_backups=10
+
+# Total amount of storage, in gigabytes, allowed for backups
+# per project (integer value)
+#quota_backup_gigabytes=1000
+
+# Number of seconds until a reservation expires (integer
+# value)
+#reservation_expire=86400
+
+# Count of reservations until usage is refreshed (integer
+# value)
+#until_refresh=0
+
+# Number of seconds between subsequent usage refreshes
+# (integer value)
+#max_age=0
+
+# Default driver to use for quota checks (string value)
+#quota_driver=cinder.quota.DbQuotaDriver
+
+# Enables or disables use of default quota class with default
+# quota. (boolean value)
+#use_default_quota_class=true
+
+
+#
+# Options defined in cinder.service
+#
+
+# Interval, in seconds, between nodes reporting state to
+# datastore (integer value)
+#report_interval=10
+
+# Interval, in seconds, between running periodic tasks
+# (integer value)
+#periodic_interval=60
+
+# Range, in seconds, to randomly delay when starting the
+# periodic task scheduler to reduce stampeding. (Disable by
+# setting to 0) (integer value)
+#periodic_fuzzy_delay=60
+
+# IP address on which OpenStack Volume API listens (string
+# value)
+#osapi_volume_listen=0.0.0.0
+
+# Port on which OpenStack Volume API listens (integer value)
+#osapi_volume_listen_port=8776
+
+# Number of workers for OpenStack Volume API service. The
+# default is equal to the number of CPUs available. (integer
+# value)
+#osapi_volume_workers=<None>
+
+
+#
+# Options defined in cinder.ssh_utils
+#
+
+# Option to enable strict host key checking. When set to
+# "True" Cinder will only connect to systems with a host key
+# present in the configured "ssh_hosts_key_file". When set to
+# "False" the host key will be saved upon first connection and
+# used for subsequent connections. Default=False (boolean
+# value)
+#strict_ssh_host_key_policy=false
+
+# File containing SSH host keys for the systems with which
+# Cinder needs to communicate. OPTIONAL:
+# Default=$state_path/ssh_known_hosts (string value)
+#ssh_hosts_key_file=$state_path/ssh_known_hosts
+
+
+#
+# Options defined in cinder.test
+#
+
+# File name of clean sqlite db (string value)
+#sqlite_clean_db=clean.sqlite
+
+
+#
+# Options defined in cinder.wsgi
+#
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large
+# tokens (typically those generated by the Keystone v3 API
+# with big service catalogs). (integer value)
+#max_header_line=16384
+
+# If False, closes the client socket connection explicitly.
+# Setting it to True to maintain backward compatibility.
+# Recommended setting is set it to False. (boolean value)
+#wsgi_keep_alive=true
+
+# Sets the value of TCP_KEEPALIVE (True/False) for each server
+# socket. (boolean value)
+#tcp_keepalive=true
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Not supported on OS X. (integer value)
+#tcp_keepidle=600
+
+# Sets the value of TCP_KEEPINTVL in seconds for each server
+# socket. Not supported on OS X. (integer value)
+#tcp_keepalive_interval=<None>
+
+# Sets the value of TCP_KEEPCNT for each server socket. Not
+# supported on OS X. (integer value)
+#tcp_keepalive_count=<None>
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ssl_ca_file=<None>
+
+# Certificate file to use when starting the server securely
+# (string value)
+#ssl_cert_file=<None>
+
+# Private key file to use when starting the server securely
+# (string value)
+#ssl_key_file=<None>
+
+
+#
+# Options defined in cinder.api.common
+#
+
+# The maximum number of items that a collection resource
+# returns in a single response (integer value)
+#osapi_max_limit=1000
+
+# Base URL that will be presented to users in links to the
+# OpenStack Volume API (string value)
+# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
+#osapi_volume_base_URL=<None>
+
+
+#
+# Options defined in cinder.api.middleware.auth
+#
+
+# Treat X-Forwarded-For as the canonical remote address. Only
+# enable this if you have a sanitizing proxy. (boolean value)
+#use_forwarded_for=false
+
+
+#
+# Options defined in cinder.api.middleware.sizelimit
+#
+
+# Max size for body of a request (integer value)
+#osapi_max_request_body_size=114688
+
+
+#
+# Options defined in cinder.backup.driver
+#
+
+# Backup metadata version to be used when backing up volume
+# metadata. If this number is bumped, make sure the service
+# doing the restore supports the new version. (integer value)
+#backup_metadata_version=1
+
+
+#
+# Options defined in cinder.backup.drivers.ceph
+#
+
+# Ceph configuration file to use. (string value)
+#backup_ceph_conf=/etc/ceph/ceph.conf
+
+# The Ceph user to connect with. Default here is to use the
+# same user as for Cinder volumes. If not using cephx this
+# should be set to None. (string value)
+#backup_ceph_user=cinder
+
+# The chunk size, in bytes, that a backup is broken into
+# before transfer to the Ceph object store. (integer value)
+#backup_ceph_chunk_size=134217728
+
+# The Ceph pool where volume backups are stored. (string
+# value)
+#backup_ceph_pool=backups
+
+# RBD stripe unit to use when creating a backup image.
+# (integer value)
+#backup_ceph_stripe_unit=0
+
+# RBD stripe count to use when creating a backup image.
+# (integer value)
+#backup_ceph_stripe_count=0
+
+# If True, always discard excess bytes when restoring volumes
+# i.e. pad with zeroes. (boolean value)
+#restore_discard_excess_bytes=true
+
+
+#
+# Options defined in cinder.backup.drivers.swift
+#
+
+# The URL of the Swift endpoint (string value)
+#backup_swift_url=<None>
+
+# Info to match when looking for swift in the service catalog.
+# Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> - Only used if
+# backup_swift_url is unset (string value)
+#swift_catalog_info=object-store:swift:publicURL
+
+# Swift authentication mechanism (string value)
+#backup_swift_auth=per_user
+
+# Swift authentication version. Specify "1" for auth 1.0, or
+# "2" for auth 2.0 (string value)
+#backup_swift_auth_version=1
+
+# Swift tenant/account name. Required when connecting to an
+# auth 2.0 system (string value)
+#backup_swift_tenant=<None>
+
+# Swift user name (string value)
+#backup_swift_user=<None>
+
+# Swift key for authentication (string value)
+#backup_swift_key=<None>
+
+# The default Swift container to use (string value)
+#backup_swift_container=volumebackups
+
+# The size in bytes of Swift backup objects (integer value)
+#backup_swift_object_size=52428800
+
+# The number of retries to make for Swift operations (integer
+# value)
+#backup_swift_retry_attempts=3
+
+# The backoff time in seconds between Swift retries (integer
+# value)
+#backup_swift_retry_backoff=2
+
+# Compression algorithm (None to disable) (string value)
+#backup_compression_algorithm=zlib
+
+
+#
+# Options defined in cinder.backup.drivers.tsm
+#
+
+# Volume prefix for the backup id when backing up to TSM
+# (string value)
+#backup_tsm_volume_prefix=backup
+
+# TSM password for the running username (string value)
+#backup_tsm_password=password
+
+# Enable or Disable compression for backups (boolean value)
+#backup_tsm_compression=true
+
+
+#
+# Options defined in cinder.backup.manager
+#
+
+# Driver to use for backups. (string value)
+# Deprecated group/name - [DEFAULT]/backup_service
+#backup_driver=cinder.backup.drivers.swift
+
+
+#
+# Options defined in cinder.common.config
+#
+
+# File name for the paste.deploy config for cinder-api (string
+# value)
+api_paste_config=api-paste.ini
+
+# Top-level directory for maintaining cinder's state (string
+# value)
+# Deprecated group/name - [DEFAULT]/pybasedir
+state_path=/var/lib/cinder
+
+# IP address of this host (string value)
+my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Default glance host name or IP (string value)
+glance_host={{ CONTROLLER_HOST_ADDRESS }}
+
+# Default glance port (integer value)
+#glance_port=9292
+
+# A list of the glance API servers available to cinder
+# ([hostname|ip]:port) (list value)
+#glance_api_servers=$glance_host:$glance_port
+
+# Version of the glance API to use (integer value)
+#glance_api_version=1
+
+# Number retries when downloading an image from glance
+# (integer value)
+#glance_num_retries=0
+
+# Allow to perform insecure SSL (https) requests to glance
+# (boolean value)
+#glance_api_insecure=false
+
+# Enables or disables negotiation of SSL layer compression. In
+# some cases disabling compression can improve data
+# throughput, such as when high network bandwidth is available
+# and you use compressed image formats like qcow2. (boolean
+# value)
+#glance_api_ssl_compression=false
+
+# Location of ca certificates file to use for glance client
+# requests. (string value)
+#glance_ca_certificates_file=<None>
+
+# http/https timeout value for glance operations. If no value
+# (None) is supplied here, the glanceclient default value is
+# used. (integer value)
+#glance_request_timeout=<None>
+
+# The topic that scheduler nodes listen on (string value)
+#scheduler_topic=cinder-scheduler
+
+# The topic that volume nodes listen on (string value)
+#volume_topic=cinder-volume
+
+# The topic that volume backup nodes listen on (string value)
+#backup_topic=cinder-backup
+
+# DEPRECATED: Deploy v1 of the Cinder API. (boolean value)
+#enable_v1_api=true
+
+# Deploy v2 of the Cinder API. (boolean value)
+#enable_v2_api=true
+
+# Enables or disables rate limit of the API. (boolean value)
+#api_rate_limit=true
+
+# Specify list of extensions to load when using
+# osapi_volume_extension option with
+# cinder.api.contrib.select_extensions (list value)
+#osapi_volume_ext_list=
+
+# osapi volume extension to load (multi valued)
+#osapi_volume_extension=cinder.api.contrib.standard_extensions
+
+# Full class name for the Manager for volume (string value)
+#volume_manager=cinder.volume.manager.VolumeManager
+
+# Full class name for the Manager for volume backup (string
+# value)
+#backup_manager=cinder.backup.manager.BackupManager
+
+# Full class name for the Manager for scheduler (string value)
+#scheduler_manager=cinder.scheduler.manager.SchedulerManager
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a host name, FQDN, or IP address. (string
+# value)
+#host=cinder
+
+# Availability zone of this node (string value)
+#storage_availability_zone=nova
+
+# Default availability zone for new volumes. If not set, the
+# storage_availability_zone option value is used as the
+# default for new volumes. (string value)
+#default_availability_zone=<None>
+
+# Default volume type to use (string value)
+#default_volume_type=<None>
+
+# Time period for which to generate volume usages. The options
+# are hour, day, month, or year. (string value)
+#volume_usage_audit_period=month
+
+# Path to the rootwrap configuration file to use for running
+# commands as root (string value)
+rootwrap_config=/etc/cinder/rootwrap.conf
+
+# Enable monkey patching (boolean value)
+#monkey_patch=false
+
+# List of modules/decorators to monkey patch (list value)
+#monkey_patch_modules=
+
+# Maximum time since last check-in for a service to be
+# considered up (integer value)
+#service_down_time=60
+
+# The full class name of the volume API class to use (string
+# value)
+#volume_api_class=cinder.volume.api.API
+
+# The full class name of the volume backup API class (string
+# value)
+#backup_api_class=cinder.backup.api.API
+
+# The strategy to use for auth. Supports noauth, keystone, and
+# deprecated. (string value)
+auth_strategy=keystone
+
+# A list of backend names to use. These backend names should
+# be backed by a unique [CONFIG] group with its options (list
+# value)
+#enabled_backends=<None>
+
+# Whether snapshots count against GigaByte quota (boolean
+# value)
+#no_snapshot_gb_quota=false
+
+# The full class name of the volume transfer API class (string
+# value)
+#transfer_api_class=cinder.transfer.api.API
+
+# The full class name of the volume replication API class
+# (string value)
+#replication_api_class=cinder.replication.api.API
+
+# The full class name of the consistencygroup API class
+# (string value)
+#consistencygroup_api_class=cinder.consistencygroup.api.API
+
+
+#
+# Options defined in cinder.compute
+#
+
+# The full class name of the compute API class to use (string
+# value)
+#compute_api_class=cinder.compute.nova.API
+
+
+#
+# Options defined in cinder.compute.nova
+#
+
+# Match this value when searching for nova in the service
+# catalog. Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#nova_catalog_info=compute:nova:publicURL
+
+# Same as nova_catalog_info, but for admin endpoint. (string
+# value)
+#nova_catalog_admin_info=compute:nova:adminURL
+
+# Override service catalog lookup with template for nova
+# endpoint e.g. http://localhost:8774/v2/%(project_id)s
+# (string value)
+#nova_endpoint_template=<None>
+
+# Same as nova_endpoint_template, but for admin endpoint.
+# (string value)
+#nova_endpoint_admin_template=<None>
+
+# Region name of this node (string value)
+#os_region_name=<None>
+
+# Location of ca certificates file to use for nova client
+# requests. (string value)
+#nova_ca_certificates_file=<None>
+
+# Allow to perform insecure SSL requests to nova (boolean
+# value)
+#nova_api_insecure=false
+
+
+#
+# Options defined in cinder.db.api
+#
+
+# The backend to use for db (string value)
+#db_backend=sqlalchemy
+
+# Services to be added to the available pool on create
+# (boolean value)
+#enable_new_services=true
+
+# Template string to be used to generate volume names (string
+# value)
+volume_name_template=volume-%s
+
+# Template string to be used to generate snapshot names
+# (string value)
+#snapshot_name_template=snapshot-%s
+
+# Template string to be used to generate backup names (string
+# value)
+#backup_name_template=backup-%s
+
+
+#
+# Options defined in cinder.db.base
+#
+
+# Driver to use for database access (string value)
+#db_driver=cinder.db
+
+
+#
+# Options defined in cinder.image.glance
+#
+
+# Default core properties of image (list value)
+#glance_core_properties=checksum,container_format,disk_format,image_name,image_id,min_disk,min_ram,name,size
+
+# A list of url schemes that can be downloaded directly via
+# the direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+
+#
+# Options defined in cinder.image.image_utils
+#
+
+# Directory used for temporary storage during image conversion
+# (string value)
+#image_conversion_dir=$state_path/conversion
+
+
+#
+# Options defined in cinder.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in cinder.openstack.common.lockutils
+#
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. Default to a temp directory
+# (string value)
+lock_path=/var/lock/cinder
+
+
+#
+# Options defined in cinder.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+use_syslog = True
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in cinder.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
+# Options defined in cinder.openstack.common.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+
+#
+# Options defined in cinder.scheduler.driver
+#
+
+# The scheduler host manager class to use (string value)
+#scheduler_host_manager=cinder.scheduler.host_manager.HostManager
+
+# Maximum number of attempts to schedule an volume (integer
+# value)
+#scheduler_max_attempts=3
+
+
+#
+# Options defined in cinder.scheduler.host_manager
+#
+
+# Which filter class names to use for filtering hosts when not
+# specified in the request. (list value)
+#scheduler_default_filters=AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter
+
+# Which weigher class names to use for weighing hosts. (list
+# value)
+#scheduler_default_weighers=CapacityWeigher
+
+
+#
+# Options defined in cinder.scheduler.manager
+#
+
+# Default scheduler driver to use (string value)
+#scheduler_driver=cinder.scheduler.filter_scheduler.FilterScheduler
+
+
+#
+# Options defined in cinder.scheduler.scheduler_options
+#
+
+# Absolute path to scheduler configuration JSON file. (string
+# value)
+#scheduler_json_config_location=
+
+
+#
+# Options defined in cinder.scheduler.simple
+#
+
+# This configure option has been deprecated along with the
+# SimpleScheduler. New scheduler is able to gather capacity
+# information for each host, thus setting the maximum number
+# of volume gigabytes for host is no longer needed. It's safe
+# to remove this configure from cinder.conf. (integer value)
+#max_gigabytes=10000
+
+
+#
+# Options defined in cinder.scheduler.weights.capacity
+#
+
+# Multiplier used for weighing volume capacity. Negative
+# numbers mean to stack vs spread. (floating point value)
+#capacity_weight_multiplier=1.0
+
+# Multiplier used for weighing volume capacity. Negative
+# numbers mean to stack vs spread. (floating point value)
+#allocated_capacity_weight_multiplier=-1.0
+
+
+#
+# Options defined in cinder.scheduler.weights.volume_number
+#
+
+# Multiplier used for weighing volume number. Negative numbers
+# mean to spread vs stack. (floating point value)
+#volume_number_multiplier=-1.0
+
+
+#
+# Options defined in cinder.transfer.api
+#
+
+# The number of characters in the salt. (integer value)
+#volume_transfer_salt_length=8
+
+# The number of characters in the autogenerated auth key.
+# (integer value)
+#volume_transfer_key_length=16
+
+
+#
+# Options defined in cinder.volume.api
+#
+
+# Cache volume availability zones in memory for the provided
+# duration in seconds (integer value)
+#az_cache_duration=3600
+
+# Create volume from snapshot at the host where snapshot
+# resides (boolean value)
+#snapshot_same_host=true
+
+# Ensure that the new volumes are the same AZ as snapshot or
+# source volume (boolean value)
+#cloned_volume_same_az=true
+
+
+#
+# Options defined in cinder.volume.driver
+#
+
+# The maximum number of times to rescan iSER targetto find
+# volume (integer value)
+#num_iser_scan_tries=3
+
+# The maximum number of iSER target IDs per host (integer
+# value)
+#iser_num_targets=100
+
+# Prefix for iSER volumes (string value)
+#iser_target_prefix=iqn.2010-10.org.iser.openstack:
+
+# The IP address that the iSER daemon is listening on (string
+# value)
+#iser_ip_address=$my_ip
+
+# The port that the iSER daemon is listening on (integer
+# value)
+#iser_port=3260
+
+# The name of the iSER target user-land tool to use (string
+# value)
+#iser_helper=tgtadm
+
+# Number of times to attempt to run flakey shell commands
+# (integer value)
+#num_shell_tries=3
+
+# The percentage of backend capacity is reserved (integer
+# value)
+#reserved_percentage=0
+
+# The maximum number of iSCSI target IDs per host (integer
+# value)
+#iscsi_num_targets=100
+
+# Prefix for iSCSI volumes (string value)
+#iscsi_target_prefix=iqn.2010-10.org.openstack:
+
+# The IP address that the iSCSI daemon is listening on (string
+# value)
+iscsi_ip_address={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# The port that the iSCSI daemon is listening on (integer
+# value)
+#iscsi_port=3260
+
+# The maximum number of times to rescan targets to find volume
+# (integer value)
+# Deprecated group/name - [DEFAULT]/num_iscsi_scan_tries
+#num_volume_device_scan_tries=3
+
+# The backend name for a given driver implementation (string
+# value)
+volume_backend_name=LVM_iSCSI
+
+# Do we attach/detach volumes in cinder using multipath for
+# volume to image and image to volume transfers? (boolean
+# value)
+#use_multipath_for_image_xfer=false
+
+# Method used to wipe old volumes (valid options are: none,
+# zero, shred) (string value)
+#volume_clear=zero
+
+# Size in MiB to wipe at start of old volumes. 0 => all
+# (integer value)
+#volume_clear_size=0
+
+# The flag to pass to ionice to alter the i/o priority of the
+# process used to zero a volume after deletion, for example
+# "-c3" for idle only priority. (string value)
+#volume_clear_ionice=<None>
+
+# iSCSI target user-land tool to use. tgtadm is default, use
+# lioadm for LIO iSCSI support, iseradm for the ISER protocol,
+# or fake for testing. (string value)
+iscsi_helper=lioadm
+
+# Volume configuration file storage directory (string value)
+volumes_dir=$state_path/volumes
+
+# IET configuration file (string value)
+#iet_conf=/etc/iet/ietd.conf
+
+# Comma-separated list of initiator IQNs allowed to connect to
+# the iSCSI target. (From Nova compute nodes.) (string value)
+#lio_initiator_iqns=
+
+# Sets the behavior of the iSCSI target to either perform
+# blockio or fileio optionally, auto can be set and Cinder
+# will autodetect type of backing device (string value)
+#iscsi_iotype=fileio
+
+# The default block size used when copying/clearing volumes
+# (string value)
+#volume_dd_blocksize=1M
+
+# The blkio cgroup name to be used to limit bandwidth of
+# volume copy (string value)
+#volume_copy_blkio_cgroup_name=cinder-volume-copy
+
+# The upper limit of bandwidth of volume copy. 0 => unlimited
+# (integer value)
+#volume_copy_bps_limit=0
+
+# Sets the behavior of the iSCSI target to either perform
+# write-back(on) or write-through(off). This parameter is
+# valid if iscsi_helper is set to tgtadm or iseradm. (string
+# value)
+#iscsi_write_cache=on
+
+# The path to the client certificate key for verification, if
+# the driver supports it. (string value)
+#driver_client_cert_key=<None>
+
+# The path to the client certificate for verification, if the
+# driver supports it. (string value)
+#driver_client_cert=<None>
+
+
+#
+# Options defined in cinder.volume.drivers.block_device
+#
+
+# List of all available devices (list value)
+#available_devices=
+
+
+#
+# Options defined in cinder.volume.drivers.coraid
+#
+
+# IP address of Coraid ESM (string value)
+#coraid_esm_address=
+
+# User name to connect to Coraid ESM (string value)
+#coraid_user=admin
+
+# Name of group on Coraid ESM to which coraid_user belongs
+# (must have admin privilege) (string value)
+#coraid_group=admin
+
+# Password to connect to Coraid ESM (string value)
+#coraid_password=password
+
+# Volume Type key name to store ESM Repository Name (string
+# value)
+#coraid_repository_key=coraid_repository
+
+
+#
+# Options defined in cinder.volume.drivers.datera
+#
+
+# Datera API token. (string value)
+#datera_api_token=<None>
+
+# Datera API port. (string value)
+#datera_api_port=7717
+
+# Datera API version. (string value)
+#datera_api_version=1
+
+# Number of replicas to create of an inode. (string value)
+#datera_num_replicas=3
+
+
+#
+# Options defined in cinder.volume.drivers.emc.emc_vmax_common
+#
+
+# use this file for cinder emc plugin config data (string
+# value)
+#cinder_emc_config_file=/etc/cinder/cinder_emc_config.xml
+
+
+#
+# Options defined in cinder.volume.drivers.emc.emc_vnx_cli
+#
+
+# VNX authentication scope type. (string value)
+#storage_vnx_authentication_type=global
+
+# Directory path that contains the VNX security file. Make
+# sure the security file is generated first. (string value)
+#storage_vnx_security_file_dir=<None>
+
+# Naviseccli Path. (string value)
+#naviseccli_path=
+
+# Storage pool name. (string value)
+#storage_vnx_pool_name=<None>
+
+# VNX secondary SP IP Address. (string value)
+#san_secondary_ip=<None>
+
+# Default timeout for CLI operations in minutes. For example,
+# LUN migration is a typical long running operation, which
+# depends on the LUN size and the load of the array. An upper
+# bound in the specific deployment can be set to avoid
+# unnecessary long wait. By default, it is 365 days long.
+# (integer value)
+#default_timeout=525600
+
+# Default max number of LUNs in a storage group. By default,
+# the value is 255. (integer value)
+#max_luns_per_storage_group=255
+
+# To destroy storage group when the last LUN is removed from
+# it. By default, the value is False. (boolean value)
+#destroy_empty_storage_group=false
+
+# Mapping between hostname and its iSCSI initiator IP
+# addresses. (string value)
+#iscsi_initiators=
+
+# Automatically register initiators. By default, the value is
+# False. (boolean value)
+#initiator_auto_registration=false
+
+
+#
+# Options defined in cinder.volume.drivers.eqlx
+#
+
+# Group name to use for creating volumes (string value)
+#eqlx_group_name=group-0
+
+# Timeout for the Group Manager cli command execution (integer
+# value)
+#eqlx_cli_timeout=30
+
+# Maximum retry count for reconnection (integer value)
+#eqlx_cli_max_retries=5
+
+# Use CHAP authentication for targets? (boolean value)
+#eqlx_use_chap=false
+
+# Existing CHAP account name (string value)
+#eqlx_chap_login=admin
+
+# Password for specified CHAP account name (string value)
+#eqlx_chap_password=password
+
+# Pool in which volumes will be created (string value)
+#eqlx_pool=default
+
+
+#
+# Options defined in cinder.volume.drivers.fujitsu_eternus_dx_common
+#
+
+# The configuration file for the Cinder SMI-S driver (string
+# value)
+#cinder_smis_config_file=/etc/cinder/cinder_fujitsu_eternus_dx.xml
+
+
+#
+# Options defined in cinder.volume.drivers.fusionio.ioControl
+#
+
+# amount of time wait for iSCSI target to come online (integer
+# value)
+#fusionio_iocontrol_targetdelay=5
+
+# number of retries for GET operations (integer value)
+#fusionio_iocontrol_retry=3
+
+# verify the array certificate on each transaction (boolean
+# value)
+#fusionio_iocontrol_verify_cert=true
+
+
+#
+# Options defined in cinder.volume.drivers.glusterfs
+#
+
+# File with the list of available gluster shares (string
+# value)
+#glusterfs_shares_config=/etc/cinder/glusterfs_shares
+
+# Create volumes as sparsed files which take no space.If set
+# to False volume is created as regular file.In such case
+# volume creation takes a lot of time. (boolean value)
+#glusterfs_sparsed_volumes=true
+
+# Create volumes as QCOW2 files rather than raw files.
+# (boolean value)
+#glusterfs_qcow2_volumes=false
+
+# Base dir containing mount points for gluster shares. (string
+# value)
+#glusterfs_mount_point_base=$state_path/mnt
+
+
+#
+# Options defined in cinder.volume.drivers.hds.hds
+#
+
+# The configuration file for the Cinder HDS driver for HUS
+# (string value)
+#hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml
+
+
+#
+# Options defined in cinder.volume.drivers.hds.iscsi
+#
+
+# Configuration file for HDS iSCSI cinder plugin (string
+# value)
+#hds_hnas_iscsi_config_file=/opt/hds/hnas/cinder_iscsi_conf.xml
+
+
+#
+# Options defined in cinder.volume.drivers.hds.nfs
+#
+
+# Configuration file for HDS NFS cinder plugin (string value)
+#hds_hnas_nfs_config_file=/opt/hds/hnas/cinder_nfs_conf.xml
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+
+# Thin pool ID of storage system (integer value)
+#hitachi_thin_pool_id=<None>
+
+# Range of logical device of storage system (string value)
+#hitachi_ldev_range=<None>
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+
+# Copy speed of storage system (integer value)
+#hitachi_copy_speed=3
+
+# Interval to check copy (integer value)
+#hitachi_copy_check_interval=3
+
+# Interval to check copy asynchronously (integer value)
+#hitachi_async_copy_check_interval=10
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+
+# Range of group number (string value)
+#hitachi_group_range=<None>
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+#hitachi_group_request=false
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+
+# Request for FC Zone creating HostGroup (boolean value)
+#hitachi_zoning_request=false
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_iscsi
+#
+
+# Add CHAP user (boolean value)
+#hitachi_add_chap_user=false
+
+# iSCSI authentication method (string value)
+#hitachi_auth_method=<None>
+
+# iSCSI authentication username (string value)
+#hitachi_auth_user=HBSD-CHAP-user
+
+# iSCSI authentication password (string value)
+#hitachi_auth_password=HBSD-CHAP-password
+
+
+#
+# Options defined in cinder.volume.drivers.huawei
+#
+
+# The configuration file for the Cinder Huawei driver (string
+# value)
+#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml
+
+
+#
+# Options defined in cinder.volume.drivers.ibm.gpfs
+#
+
+# Specifies the path of the GPFS directory where Block Storage
+# volume and snapshot files are stored. (string value)
+#gpfs_mount_point_base=<None>
+
+# Specifies the path of the Image service repository in GPFS.
+# Leave undefined if not storing images in GPFS. (string
+# value)
+#gpfs_images_dir=<None>
+
+# Specifies the type of image copy to be used. Set this when
+# the Image service repository also uses GPFS so that image
+# files can be transferred efficiently from the Image service
+# to the Block Storage service. There are two valid values:
+# "copy" specifies that a full copy of the image is made;
+# "copy_on_write" specifies that copy-on-write optimization
+# strategy is used and unmodified blocks of the image file are
+# shared efficiently. (string value)
+#gpfs_images_share_mode=<None>
+
+# Specifies an upper limit on the number of indirections
+# required to reach a specific block due to snapshots or
+# clones. A lengthy chain of copy-on-write snapshots or
+# clones can have a negative impact on performance, but
+# improves space utilization. 0 indicates unlimited clone
+# depth. (integer value)
+#gpfs_max_clone_depth=0
+
+# Specifies that volumes are created as sparse files which
+# initially consume no space. If set to False, the volume is
+# created as a fully allocated file, in which case, creation
+# may take a significantly longer time. (boolean value)
+#gpfs_sparse_volumes=true
+
+# Specifies the storage pool that volumes are assigned to. By
+# default, the system storage pool is used. (string value)
+#gpfs_storage_pool=system
+
+
+#
+# Options defined in cinder.volume.drivers.ibm.ibmnas
+#
+
+# IP address or Hostname of NAS system. (string value)
+#nas_ip=
+
+# User name to connect to NAS system. (string value)
+#nas_login=admin
+
+# Password to connect to NAS system. (string value)
+#nas_password=
+
+# SSH port to use to connect to NAS system. (integer value)
+#nas_ssh_port=22
+
+# Filename of private key to use for SSH authentication.
+# (string value)
+#nas_private_key=
+
+# IBMNAS platform type to be used as backend storage; valid
+# values are - v7ku : for using IBM Storwize V7000 Unified,
+# sonas : for using IBM Scale Out NAS, gpfs-nas : for using
+# NFS based IBM GPFS deployments. (string value)
+#ibmnas_platform_type=v7ku
+
+
+#
+# Options defined in cinder.volume.drivers.ibm.storwize_svc
+#
+
+# Storage system storage pool for volumes (string value)
+#storwize_svc_volpool_name=volpool
+
+# Storage system space-efficiency parameter for volumes
+# (percentage) (integer value)
+#storwize_svc_vol_rsize=2
+
+# Storage system threshold for volume capacity warnings
+# (percentage) (integer value)
+#storwize_svc_vol_warning=0
+
+# Storage system autoexpand parameter for volumes (True/False)
+# (boolean value)
+#storwize_svc_vol_autoexpand=true
+
+# Storage system grain size parameter for volumes
+# (32/64/128/256) (integer value)
+#storwize_svc_vol_grainsize=256
+
+# Storage system compression option for volumes (boolean
+# value)
+#storwize_svc_vol_compression=false
+
+# Enable Easy Tier for volumes (boolean value)
+#storwize_svc_vol_easytier=true
+
+# The I/O group in which to allocate volumes (integer value)
+#storwize_svc_vol_iogrp=0
+
+# Maximum number of seconds to wait for FlashCopy to be
+# prepared. Maximum value is 600 seconds (10 minutes) (integer
+# value)
+#storwize_svc_flashcopy_timeout=120
+
+# Connection protocol (iSCSI/FC) (string value)
+#storwize_svc_connection_protocol=iSCSI
+
+# Configure CHAP authentication for iSCSI connections
+# (Default: Enabled) (boolean value)
+#storwize_svc_iscsi_chap_enabled=true
+
+# Connect with multipath (FC only; iSCSI multipath is
+# controlled by Nova) (boolean value)
+#storwize_svc_multipath_enabled=false
+
+# Allows vdisk to multi host mapping (boolean value)
+#storwize_svc_multihostmap_enabled=true
+
+# Indicate whether svc driver is compatible for NPIV setup. If
+# it is compatible, it will allow no wwpns being returned on
+# get_conn_fc_wwpns during initialize_connection (boolean
+# value)
+#storwize_svc_npiv_compatibility_mode=false
+
+# Allow tenants to specify QOS on create (boolean value)
+#storwize_svc_allow_tenant_qos=false
+
+# If operating in stretched cluster mode, specify the name of
+# the pool in which mirrored copies are stored.Example:
+# "pool2" (string value)
+#storwize_svc_stretched_cluster_partner=<None>
+
+
+#
+# Options defined in cinder.volume.drivers.ibm.xiv_ds8k
+#
+
+# Proxy driver that connects to the IBM Storage Array (string
+# value)
+#xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy
+
+# Connection type to the IBM Storage Array
+# (fibre_channel|iscsi) (string value)
+#xiv_ds8k_connection_type=iscsi
+
+# CHAP authentication mode, effective only for iscsi
+# (disabled|enabled) (string value)
+#xiv_chap=disabled
+
+
+#
+# Options defined in cinder.volume.drivers.lvm
+#
+
+# Name for the VG that will contain exported volumes (string
+# value)
+volume_group=cinder-volumes
+
+# If >0, create LVs with multiple mirrors. Note that this
+# requires lvm_mirrors + 2 PVs with available space (integer
+# value)
+#lvm_mirrors=0
+
+# Type of LVM volumes to deploy; (default or thin) (string
+# value)
+#lvm_type=default
+
+
+#
+# Options defined in cinder.volume.drivers.netapp.options
+#
+
+# The vFiler unit on which provisioning of block storage
+# volumes will be done. This option is only used by the driver
+# when connecting to an instance with a storage family of Data
+# ONTAP operating in 7-Mode and the storage protocol selected
+# is iSCSI. Only use this option when utilizing the MultiStore
+# feature on the NetApp storage system. (string value)
+#netapp_vfiler=<None>
+
+# Administrative user account name used to access the storage
+# system or proxy server. (string value)
+#netapp_login=<None>
+
+# Password for the administrative user account specified in
+# the netapp_login option. (string value)
+#netapp_password=<None>
+
+# This option specifies the virtual storage server (Vserver)
+# name on the storage cluster on which provisioning of block
+# storage volumes should occur. If using the NFS storage
+# protocol, this parameter is mandatory for storage service
+# catalog support (utilized by Cinder volume type extra_specs
+# support). If this option is specified, the exports belonging
+# to the Vserver will only be used for provisioning in the
+# future. Block storage volumes on exports not belonging to
+# the Vserver specified by this option will continue to
+# function normally. (string value)
+#netapp_vserver=<None>
+
+# The hostname (or IP address) for the storage system or proxy
+# server. (string value)
+#netapp_server_hostname=<None>
+
+# The TCP port to use for communication with the storage
+# system or proxy server. Traditionally, port 80 is used for
+# HTTP and port 443 is used for HTTPS; however, this value
+# should be changed if an alternate port has been configured
+# on the storage system or proxy server. (integer value)
+#netapp_server_port=80
+
+# This option is used to specify the path to the E-Series
+# proxy application on a proxy server. The value is combined
+# with the value of the netapp_transport_type,
+# netapp_server_hostname, and netapp_server_port options to
+# create the URL used by the driver to connect to the proxy
+# application. (string value)
+#netapp_webservice_path=/devmgr/v2
+
+# This option is only utilized when the storage family is
+# configured to eseries. This option is used to restrict
+# provisioning to the specified controllers. Specify the value
+# of this option to be a comma separated list of controller
+# hostnames or IP addresses to be used for provisioning.
+# (string value)
+#netapp_controller_ips=<None>
+
+# Password for the NetApp E-Series storage array. (string
+# value)
+#netapp_sa_password=<None>
+
+# This option is used to restrict provisioning to the
+# specified storage pools. Only dynamic disk pools are
+# currently supported. Specify the value of this option to be
+# a comma separated list of disk pool names to be used for
+# provisioning. (string value)
+#netapp_storage_pools=<None>
+
+# This option is used to define how the controllers in the
+# E-Series storage array will work with the particular
+# operating system on the hosts that are connected to it.
+# (string value)
+#netapp_eseries_host_type=linux_dm_mp
+
+# If the percentage of available space for an NFS share has
+# dropped below the value specified by this option, the NFS
+# image cache will be cleaned. (integer value)
+#thres_avl_size_perc_start=20
+
+# When the percentage of available space on an NFS share has
+# reached the percentage specified by this option, the driver
+# will stop clearing files from the NFS image cache that have
+# not been accessed in the last M minutes, where M is the
+# value of the expiry_thres_minutes configuration option.
+# (integer value)
+#thres_avl_size_perc_stop=60
+
+# This option specifies the threshold for last access time for
+# images in the NFS image cache. When a cache cleaning cycle
+# begins, images in the cache that have not been accessed in
+# the last M minutes, where M is the value of this parameter,
+# will be deleted from the cache to create free space on the
+# NFS share. (integer value)
+#expiry_thres_minutes=720
+
+# This option specifies the path of the NetApp copy offload
+# tool binary. Ensure that the binary has execute permissions
+# set which allow the effective user of the cinder-volume
+# process to execute the file. (string value)
+#netapp_copyoffload_tool_path=<None>
+
+# The quantity to be multiplied by the requested volume size
+# to ensure enough space is available on the virtual storage
+# server (Vserver) to fulfill the volume creation request.
+# (floating point value)
+#netapp_size_multiplier=1.2
+
+# This option is only utilized when the storage protocol is
+# configured to use iSCSI. This option is used to restrict
+# provisioning to the specified controller volumes. Specify
+# the value of this option to be a comma separated list of
+# NetApp controller volume names to be used for provisioning.
+# (string value)
+#netapp_volume_list=<None>
+
+# The storage family type used on the storage system; valid
+# values are ontap_7mode for using Data ONTAP operating in
+# 7-Mode, ontap_cluster for using clustered Data ONTAP, or
+# eseries for using E-Series. (string value)
+#netapp_storage_family=ontap_cluster
+
+# The storage protocol to be used on the data path with the
+# storage system; valid values are iscsi or nfs. (string
+# value)
+#netapp_storage_protocol=<None>
+
+# The transport protocol used when communicating with the
+# storage system or proxy server. Valid values are http or
+# https. (string value)
+#netapp_transport_type=http
+
+
+#
+# Options defined in cinder.volume.drivers.nexenta.options
+#
+
+# IP address of Nexenta SA (string value)
+#nexenta_host=
+
+# HTTP port to connect to Nexenta REST API server (integer
+# value)
+#nexenta_rest_port=2000
+
+# Use http or https for REST connection (default auto) (string
+# value)
+#nexenta_rest_protocol=auto
+
+# User name to connect to Nexenta SA (string value)
+#nexenta_user=admin
+
+# Password to connect to Nexenta SA (string value)
+#nexenta_password=nexenta
+
+# Nexenta target portal port (integer value)
+#nexenta_iscsi_target_portal_port=3260
+
+# SA Pool that holds all volumes (string value)
+#nexenta_volume=cinder
+
+# IQN prefix for iSCSI targets (string value)
+#nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder-
+
+# Prefix for iSCSI target groups on SA (string value)
+#nexenta_target_group_prefix=cinder/
+
+# File with the list of available nfs shares (string value)
+#nexenta_shares_config=/etc/cinder/nfs_shares
+
+# Base directory that contains NFS share mount points (string
+# value)
+#nexenta_mount_point_base=$state_path/mnt
+
+# Enables or disables the creation of volumes as sparsed files
+# that take no space. If disabled (False), volume is created
+# as a regular file, which takes a long time. (boolean value)
+#nexenta_sparsed_volumes=true
+
+# Default compression value for new ZFS folders. (string
+# value)
+#nexenta_volume_compression=on
+
+# If set True cache NexentaStor appliance volroot option
+# value. (boolean value)
+#nexenta_nms_cache_volroot=true
+
+# Enable stream compression, level 1..9. 1 - gives best speed;
+# 9 - gives best compression. (integer value)
+#nexenta_rrmgr_compression=0
+
+# TCP Buffer size in KiloBytes. (integer value)
+#nexenta_rrmgr_tcp_buf_size=4096
+
+# Number of TCP connections. (integer value)
+#nexenta_rrmgr_connections=2
+
+# Block size for volumes (default=blank means 8KB) (string
+# value)
+#nexenta_blocksize=
+
+# Enables or disables the creation of sparse volumes (boolean
+# value)
+#nexenta_sparse=false
+
+
+#
+# Options defined in cinder.volume.drivers.nfs
+#
+
+# File with the list of available nfs shares (string value)
+#nfs_shares_config=/etc/cinder/nfs_shares
+
+# Create volumes as sparsed files which take no space.If set
+# to False volume is created as regular file.In such case
+# volume creation takes a lot of time. (boolean value)
+#nfs_sparsed_volumes=true
+
+# Percent of ACTUAL usage of the underlying volume before no
+# new volumes can be allocated to the volume destination.
+# (floating point value)
+#nfs_used_ratio=0.95
+
+# This will compare the allocated to available space on the
+# volume destination. If the ratio exceeds this number, the
+# destination will no longer be valid. (floating point value)
+#nfs_oversub_ratio=1.0
+
+# Base dir containing mount points for nfs shares. (string
+# value)
+#nfs_mount_point_base=$state_path/mnt
+
+# Mount options passed to the nfs client. See section of the
+# nfs man page for details. (string value)
+#nfs_mount_options=<None>
+
+
+#
+# Options defined in cinder.volume.drivers.nimble
+#
+
+# Nimble Controller pool name (string value)
+#nimble_pool_name=default
+
+# Nimble Subnet Label (string value)
+#nimble_subnet_label=*
+
+
+#
+# Options defined in cinder.volume.drivers.prophetstor.options
+#
+
+# DPL pool uuid in which DPL volumes are stored. (string
+# value)
+#dpl_pool=
+
+# DPL port number. (integer value)
+#dpl_port=8357
+
+
+#
+# Options defined in cinder.volume.drivers.pure
+#
+
+# REST API authorization token. (string value)
+#pure_api_token=<None>
+
+
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=rbd
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=<None>
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=<None>
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+
+#
+# Options defined in cinder.volume.drivers.remotefs
+#
+
+# IP address or Hostname of NAS system. (string value)
+#nas_ip=
+
+# User name to connect to NAS system. (string value)
+#nas_login=admin
+
+# Password to connect to NAS system. (string value)
+#nas_password=
+
+# SSH port to use to connect to NAS system. (integer value)
+#nas_ssh_port=22
+
+# Filename of private key to use for SSH authentication.
+# (string value)
+#nas_private_key=
+
+
+#
+# Options defined in cinder.volume.drivers.san.hp.hp_3par_common
+#
+
+# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1
+# (string value)
+#hp3par_api_url=
+
+# 3PAR Super user username (string value)
+#hp3par_username=
+
+# 3PAR Super user password (string value)
+#hp3par_password=
+
+# The CPG to use for volume creation (string value)
+#hp3par_cpg=OpenStack
+
+# The CPG to use for Snapshots for volumes. If empty
+# hp3par_cpg will be used (string value)
+#hp3par_cpg_snap=
+
+# The time in hours to retain a snapshot. You can't delete it
+# before this expires. (string value)
+#hp3par_snapshot_retention=
+
+# The time in hours when a snapshot expires and is deleted.
+# This must be larger than expiration (string value)
+#hp3par_snapshot_expiration=
+
+# Enable HTTP debugging to 3PAR (boolean value)
+#hp3par_debug=false
+
+# List of target iSCSI addresses to use. (list value)
+#hp3par_iscsi_ips=
+
+# Enable CHAP authentication for iSCSI connections. (boolean
+# value)
+#hp3par_iscsi_chap_enabled=false
+
+
+#
+# Options defined in cinder.volume.drivers.san.hp.hp_lefthand_rest_proxy
+#
+
+# HP LeftHand WSAPI Server Url like https://<LeftHand
+# ip>:8081/lhos (string value)
+#hplefthand_api_url=<None>
+
+# HP LeftHand Super user username (string value)
+#hplefthand_username=<None>
+
+# HP LeftHand Super user password (string value)
+#hplefthand_password=<None>
+
+# HP LeftHand cluster name (string value)
+#hplefthand_clustername=<None>
+
+# Configure CHAP authentication for iSCSI connections
+# (Default: Disabled) (boolean value)
+#hplefthand_iscsi_chap_enabled=false
+
+# Enable HTTP debugging to LeftHand (boolean value)
+#hplefthand_debug=false
+
+
+#
+# Options defined in cinder.volume.drivers.san.hp.hp_msa_common
+#
+
+# The VDisk to use for volume creation. (string value)
+#msa_vdisk=OpenStack
+
+
+#
+# Options defined in cinder.volume.drivers.san.san
+#
+
+# Use thin provisioning for SAN volumes? (boolean value)
+#san_thin_provision=true
+
+# IP address of SAN controller (string value)
+#san_ip=
+
+# Username for SAN controller (string value)
+#san_login=admin
+
+# Password for SAN controller (string value)
+#san_password=
+
+# Filename of private key to use for SSH authentication
+# (string value)
+#san_private_key=
+
+# Cluster name to use for creating volumes (string value)
+#san_clustername=
+
+# SSH port to use with SAN (integer value)
+#san_ssh_port=22
+
+# Execute commands locally instead of over SSH; use if the
+# volume service is running on the SAN device (boolean value)
+#san_is_local=false
+
+# SSH connection timeout in seconds (integer value)
+#ssh_conn_timeout=30
+
+# Minimum ssh connections in the pool (integer value)
+#ssh_min_pool_conn=1
+
+# Maximum ssh connections in the pool (integer value)
+#ssh_max_pool_conn=5
+
+
+#
+# Options defined in cinder.volume.drivers.san.solaris
+#
+
+# The ZFS path under which to create zvols for volumes.
+# (string value)
+#san_zfs_volume_base=rpool/
+
+
+#
+# Options defined in cinder.volume.drivers.scality
+#
+
+# Path or URL to Scality SOFS configuration file (string
+# value)
+#scality_sofs_config=<None>
+
+# Base dir where Scality SOFS shall be mounted (string value)
+#scality_sofs_mount_point=$state_path/scality
+
+# Path from Scality SOFS root to volume dir (string value)
+#scality_sofs_volume_dir=cinder/volumes
+
+
+#
+# Options defined in cinder.volume.drivers.smbfs
+#
+
+# File with the list of available smbfs shares. (string value)
+#smbfs_shares_config=/etc/cinder/smbfs_shares
+
+# Default format that will be used when creating volumes if no
+# volume format is specified. Can be set to: raw, qcow2, vhd
+# or vhdx. (string value)
+#smbfs_default_volume_format=qcow2
+
+# Create volumes as sparsed files which take no space rather
+# than regular files when using raw format, in which case
+# volume creation takes lot of time. (boolean value)
+#smbfs_sparsed_volumes=true
+
+# Percent of ACTUAL usage of the underlying volume before no
+# new volumes can be allocated to the volume destination.
+# (floating point value)
+#smbfs_used_ratio=0.95
+
+# This will compare the allocated to available space on the
+# volume destination. If the ratio exceeds this number, the
+# destination will no longer be valid. (floating point value)
+#smbfs_oversub_ratio=1.0
+
+# Base dir containing mount points for smbfs shares. (string
+# value)
+#smbfs_mount_point_base=$state_path/mnt
+
+# Mount options passed to the smbfs client. See mount.cifs man
+# page for details. (string value)
+#smbfs_mount_options=noperm,file_mode=0775,dir_mode=0775
+
+
+#
+# Options defined in cinder.volume.drivers.solidfire
+#
+
+# Set 512 byte emulation on volume creation; (boolean value)
+#sf_emulate_512=true
+
+# Allow tenants to specify QOS on create (boolean value)
+#sf_allow_tenant_qos=false
+
+# Create SolidFire accounts with this prefix. Any string can
+# be used here, but the string "hostname" is special and will
+# create a prefix using the cinder node hostsname (previous
+# default behavior). The default is NO prefix. (string value)
+#sf_account_prefix=<None>
+
+# SolidFire API port. Useful if the device api is behind a
+# proxy on a different port. (integer value)
+#sf_api_port=443
+
+
+#
+# Options defined in cinder.volume.drivers.vmware.vmdk
+#
+
+# IP address for connecting to VMware ESX/VC server. (string
+# value)
+#vmware_host_ip=<None>
+
+# Username for authenticating with VMware ESX/VC server.
+# (string value)
+#vmware_host_username=<None>
+
+# Password for authenticating with VMware ESX/VC server.
+# (string value)
+#vmware_host_password=<None>
+
+# Optional VIM service WSDL Location e.g
+# http://<server>/vimService.wsdl. Optional over-ride to
+# default location for bug work-arounds. (string value)
+#vmware_wsdl_location=<None>
+
+# Number of times VMware ESX/VC server API must be retried
+# upon connection related issues. (integer value)
+#vmware_api_retry_count=10
+
+# The interval (in seconds) for polling remote tasks invoked
+# on VMware ESX/VC server. (floating point value)
+#vmware_task_poll_interval=0.5
+
+# Name for the folder in the VC datacenter that will contain
+# cinder volumes. (string value)
+#vmware_volume_folder=cinder-volumes
+
+# Timeout in seconds for VMDK volume transfer between Cinder
+# and Glance. (integer value)
+#vmware_image_transfer_timeout_secs=7200
+
+# Max number of objects to be retrieved per batch. Query
+# results will be obtained in batches from the server and not
+# in one shot. Server may still limit the count to something
+# less than the configured value. (integer value)
+#vmware_max_objects_retrieval=100
+
+# Optional string specifying the VMware VC server version. The
+# driver attempts to retrieve the version from VMware VC
+# server. Set this configuration only if you want to override
+# the VC server version. (string value)
+#vmware_host_version=<None>
+
+# Directory where virtual disks are stored during volume
+# backup and restore. (string value)
+#vmware_tmp_dir=/tmp
+
+
+#
+# Options defined in cinder.volume.drivers.windows.windows
+#
+
+# Path to store VHD backed volumes (string value)
+#windows_iscsi_lun_path=C:\iSCSIVirtualDisks
+
+
+#
+# Options defined in cinder.volume.drivers.zadara
+#
+
+# Management IP of Zadara VPSA (string value)
+#zadara_vpsa_ip=<None>
+
+# Zadara VPSA port number (string value)
+#zadara_vpsa_port=<None>
+
+# Use SSL connection (boolean value)
+#zadara_vpsa_use_ssl=false
+
+# User name for the VPSA (string value)
+#zadara_user=<None>
+
+# Password for the VPSA (string value)
+#zadara_password=<None>
+
+# Name of VPSA storage pool for volumes (string value)
+#zadara_vpsa_poolname=<None>
+
+# Default thin provisioning policy for volumes (boolean value)
+#zadara_vol_thin=true
+
+# Default encryption policy for volumes (boolean value)
+#zadara_vol_encrypt=false
+
+# Default template for VPSA volume names (string value)
+#zadara_vol_name_template=OS_%s
+
+# Automatically detach from servers on volume delete (boolean
+# value)
+#zadara_vpsa_auto_detach_on_delete=true
+
+# Don't halt on deletion of non-existing volumes (boolean
+# value)
+#zadara_vpsa_allow_nonexistent_delete=true
+
+
+#
+# Options defined in cinder.volume.drivers.zfssa.zfssaiscsi
+#
+
+# Storage pool name. (string value)
+#zfssa_pool=<None>
+
+# Project name. (string value)
+#zfssa_project=<None>
+
+# Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k.
+# (string value)
+#zfssa_lun_volblocksize=8k
+
+# Flag to enable sparse (thin-provisioned): True, False.
+# (boolean value)
+#zfssa_lun_sparse=false
+
+# Data compression-off, lzjb, gzip-2, gzip, gzip-9. (string
+# value)
+#zfssa_lun_compression=
+
+# Synchronous write bias-latency, throughput. (string value)
+#zfssa_lun_logbias=
+
+# iSCSI initiator group. (string value)
+#zfssa_initiator_group=
+
+# iSCSI initiator IQNs. (comma separated) (string value)
+#zfssa_initiator=
+
+# iSCSI initiator CHAP user. (string value)
+#zfssa_initiator_user=
+
+# iSCSI initiator CHAP password. (string value)
+#zfssa_initiator_password=
+
+# iSCSI target group name. (string value)
+#zfssa_target_group=tgt-grp
+
+# iSCSI target CHAP user. (string value)
+#zfssa_target_user=
+
+# iSCSI target CHAP password. (string value)
+#zfssa_target_password=
+
+# iSCSI target portal (Data-IP:Port, w.x.y.z:3260). (string
+# value)
+#zfssa_target_portal=<None>
+
+# Network interfaces of iSCSI targets. (comma separated)
+# (string value)
+#zfssa_target_interfaces=<None>
+
+# REST connection timeout. (seconds) (integer value)
+#zfssa_rest_timeout=<None>
+
+
+#
+# Options defined in cinder.volume.manager
+#
+
+# Driver to use for volume creation (string value)
+volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+
+# Timeout for creating the volume to migrate to when
+# performing volume migration (seconds) (integer value)
+#migration_create_volume_timeout_secs=300
+
+# Offload pending volume delete during volume service startup
+# (boolean value)
+#volume_service_inithost_offload=false
+
+# FC Zoning mode configured (string value)
+#zoning_mode=none
+
+# User defined capabilities, a JSON formatted string
+# specifying key/value pairs. (string value)
+#extra_capabilities={}
+
+
+[BRCD_FABRIC_EXAMPLE]
+
+#
+# Options defined in cinder.zonemanager.drivers.brocade.brcd_fabric_opts
+#
+
+# Management IP of fabric (string value)
+#fc_fabric_address=
+
+# Fabric user ID (string value)
+#fc_fabric_user=
+
+# Password for user (string value)
+#fc_fabric_password=
+
+# Connecting port (integer value)
+#fc_fabric_port=22
+
+# overridden zoning policy (string value)
+#zoning_policy=initiator-target
+
+# overridden zoning activation state (boolean value)
+#zone_activate=true
+
+# overridden zone name prefix (string value)
+#zone_name_prefix=<None>
+
+# Principal switch WWN of the fabric (string value)
+#principal_switch_wwn=<None>
+
+
+[CISCO_FABRIC_EXAMPLE]
+
+#
+# Options defined in cinder.zonemanager.drivers.cisco.cisco_fabric_opts
+#
+
+# Management IP of fabric (string value)
+#cisco_fc_fabric_address=
+
+# Fabric user ID (string value)
+#cisco_fc_fabric_user=
+
+# Password for user (string value)
+#cisco_fc_fabric_password=
+
+# Connecting port (integer value)
+#cisco_fc_fabric_port=22
+
+# overridden zoning policy (string value)
+#cisco_zoning_policy=initiator-target
+
+# overridden zoning activation state (boolean value)
+#cisco_zone_activate=true
+
+# overridden zone name prefix (string value)
+#cisco_zone_name_prefix=<None>
+
+# VSAN of the Fabric (string value)
+#cisco_zoning_vsan=<None>
+
+
+[database]
+
+#
+# Options defined in oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection=postgresql://{{ CINDER_DB_USER }}:{{ CINDER_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/cinder
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum number of database connection retries during
+# startup. Set to -1 to specify an infinite retry count.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost. (boolean value)
+#use_db_reconnect=false
+
+# Seconds between database connection retries. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between database connection
+# retries up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between
+# database connection retries. (integer value)
+#db_max_retry_interval=10
+
+# Maximum database connection retries before error is raised.
+# Set to -1 to specify an infinite retry count. (integer
+# value)
+#db_max_retries=20
+
+
+#
+# Options defined in oslo.db.concurrency
+#
+
+# Enable the experimental use of thread pooling for all DB API
+# calls (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool=false
+
+
+[fc-zone-manager]
+
+#
+# Options defined in cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver
+#
+
+# Southbound connector for zoning operation (string value)
+#brcd_sb_connector=cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.BrcdFCZoneClientCLI
+
+
+#
+# Options defined in cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver
+#
+
+# Southbound connector for zoning operation (string value)
+#cisco_sb_connector=cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI
+
+
+#
+# Options defined in cinder.zonemanager.fc_zone_manager
+#
+
+# FC Zone Driver responsible for zone management (string
+# value)
+#zone_driver=cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver
+
+# Zoning policy configured by user (string value)
+#zoning_policy=initiator-target
+
+# Comma separated list of fibre channel fabric names. This
+# list of names is used to retrieve other SAN credentials for
+# connecting to each SAN fabric (string value)
+#fc_fabric_names=<None>
+
+# FC San Lookup Service (string value)
+#fc_san_lookup_service=cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService
+
+
+[keymgr]
+
+#
+# Options defined in cinder.keymgr
+#
+
+# The full class name of the key manager API class (string
+# value)
+#api_class=cinder.keymgr.conf_key_mgr.ConfKeyManager
+
+
+#
+# Options defined in cinder.keymgr.conf_key_mgr
+#
+
+# Fixed key returned by key manager, specified in hex (string
+# value)
+#fixed_key=<None>
+
+
+#
+# Options defined in cinder.keymgr.key_mgr
+#
+
+# Authentication url for encryption service. (string value)
+#encryption_auth_url=http://localhost:5000/v2.0
+
+# Url for encryption service. (string value)
+#encryption_api_url=http://localhost:9311/v1
+
+
+[keystone_authtoken]
+
+#
+# Options defined in keystonemiddleware.auth_token
+#
+
+# Prefix to prepend at the beginning of the path. Deprecated,
+# use identity_uri. (string value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint. Deprecated,
+# use identity_uri. (string value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use
+# identity_uri. (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https).
+# Deprecated, use identity_uri. (string value)
+#auth_protocol=https
+
+# Complete public Identity API endpoint (string value)
+auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+
+# Complete admin Identity API endpoint. This should specify
+# the unversioned root endpoint e.g. https://localhost:35357/
+# (string value)
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+
+# API version of the admin Identity API endpoint (string
+# value)
+#auth_version=<None>
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (boolean value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# This option is deprecated and may be removed in a future
+# release. Single shared secret with the Keystone
+# configuration used for bootstrapping a Keystone
+# installation, or otherwise bypassing the normal
+# authentication process. This option should not be used, use
+# `admin_user` and `admin_password` instead. (string value)
+#admin_token=<None>
+
+# Keystone account username (string value)
+admin_user={{ CINDER_SERVICE_USER }}
+
+# Keystone account password (string value)
+admin_password={{ CINDER_SERVICE_PASSWORD }}
+
+# Keystone service account tenant name to validate user tokens
+# (string value)
+admin_tenant_name=service
+
+# Env key for the swift cache (string value)
+#cache=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#certfile=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPs connections. Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens (string
+# value)
+#signing_dir=<None>
+
+# Optionally specify a list of memcached server(s) to use for
+# caching. If left undefined, tokens will instead be cached
+# in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive effort spent validating
+# tokens, the middleware caches previously-seen tokens for a
+# configurable duration (in seconds). Set to -1 to disable
+# caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens
+# is retrieved from the Identity service (in seconds). A high
+# number of revocation events combined with a low cache
+# duration may significantly reduce performance. (integer
+# value)
+#revocation_cache_time=10
+
+# (optional) if defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=<None>
+
+# (optional, mandatory if memcache_security_strategy is
+# defined) this string is used for key derivation. (string
+# value)
+#memcache_secret_key=<None>
+
+# (optional) number of seconds memcached server is considered
+# dead before it is tried again. (integer value)
+#memcache_pool_dead_retry=300
+
+# (optional) max total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize=10
+
+# (optional) socket timeout in seconds for communicating with
+# a memcache server. (integer value)
+#memcache_pool_socket_timeout=3
+
+# (optional) number of seconds a connection to memcached is
+# held unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout=60
+
+# (optional) number of seconds that an operation will wait to
+# get a memcache client connection from the pool. (integer
+# value)
+#memcache_pool_conn_get_timeout=10
+
+# (optional) use the advanced (eventlet safe) memcache client
+# pool. The advanced pool will only work under python 2.x.
+# (boolean value)
+#memcache_use_advanced_pool=false
+
+# (optional) indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+# If true, the revocation list will be checked for cached
+# tokens. This requires that PKI tokens are configured on the
+# Keystone server. (boolean value)
+#check_revocations_for_cached=false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a
+# single algorithm or multiple. The algorithms are those
+# supported by Python standard hashlib.new(). The hashes will
+# be tried in the order given, so put the preferred one first
+# for performance. The result of the first hash will be stored
+# in the cache. This will typically be set to multiple values
+# only while migrating from a less secure algorithm to a more
+# secure one. Once all the old tokens are expired this option
+# should be set to a single value for better performance.
+# (list value)
+#hash_algorithms=md5
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[oslo_messaging_amqp]
+
+#
+# Options defined in oslo.messaging
+#
+# NOTE: Options in this group are supported when using oslo.messaging >=1.5.0.
+
+# address prefix used when sending to a specific server
+# (string value)
+#server_request_prefix=exclusive
+
+# address prefix used when broadcasting to all servers (string
+# value)
+#broadcast_prefix=broadcast
+
+# address prefix when sending to any server in group (string
+# value)
+#group_request_prefix=unicast
+
+# Name for the AMQP container (string value)
+#container_name=<None>
+
+# Timeout for inactive connections (in seconds) (integer
+# value)
+#idle_timeout=0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+#trace=false
+
+# CA certificate PEM file for verifing server certificate
+# (string value)
+#ssl_ca_file=
+
+# Identifying certificate PEM file to present to clients
+# (string value)
+#ssl_cert_file=
+
+# Private key PEM file used to sign cert_file certificate
+# (string value)
+#ssl_key_file=
+
+# Password for decrypting ssl_key_file (if encrypted) (string
+# value)
+#ssl_key_password=<None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+#allow_insecure_clients=false
+
+
+[profiler]
+
+#
+# Options defined in cinder.service
+#
+
+# If False fully disable profiling feature. (boolean value)
+#profiler_enabled=false
+
+# If False doesn't trace SQL requests. (boolean value)
+#trace_sqlalchemy=false
+
+
+[ssl]
+
+#
+# Options defined in cinder.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely
+# (string value)
+#key_file=<None>
+
+
diff --git a/install-files/openstack/usr/share/openstack/cinder/policy.json b/install-files/openstack/usr/share/openstack/cinder/policy.json
new file mode 100644
index 00000000..8f3a7b2f
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/cinder/policy.json
@@ -0,0 +1,80 @@
+{
+ "context_is_admin": "role:admin",
+ "admin_or_owner": "is_admin:True or project_id:%(project_id)s",
+ "default": "rule:admin_or_owner",
+
+ "admin_api": "is_admin:True",
+
+ "volume:create": "",
+ "volume:get_all": "",
+ "volume:get_volume_metadata": "",
+ "volume:get_volume_admin_metadata": "rule:admin_api",
+ "volume:delete_volume_admin_metadata": "rule:admin_api",
+ "volume:update_volume_admin_metadata": "rule:admin_api",
+ "volume:get_snapshot": "",
+ "volume:get_all_snapshots": "",
+ "volume:extend": "",
+ "volume:update_readonly_flag": "",
+ "volume:retype": "",
+
+ "volume_extension:types_manage": "rule:admin_api",
+ "volume_extension:types_extra_specs": "rule:admin_api",
+ "volume_extension:volume_type_encryption": "rule:admin_api",
+ "volume_extension:volume_encryption_metadata": "rule:admin_or_owner",
+ "volume_extension:extended_snapshot_attributes": "",
+ "volume_extension:volume_image_metadata": "",
+
+ "volume_extension:quotas:show": "",
+ "volume_extension:quotas:update": "rule:admin_api",
+ "volume_extension:quota_classes": "",
+
+ "volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
+ "volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
+ "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
+ "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api",
+ "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api",
+
+ "volume_extension:volume_host_attribute": "rule:admin_api",
+ "volume_extension:volume_tenant_attribute": "rule:admin_or_owner",
+ "volume_extension:volume_mig_status_attribute": "rule:admin_api",
+ "volume_extension:hosts": "rule:admin_api",
+ "volume_extension:services": "rule:admin_api",
+
+ "volume_extension:volume_manage": "rule:admin_api",
+ "volume_extension:volume_unmanage": "rule:admin_api",
+
+ "volume:services": "rule:admin_api",
+
+ "volume:create_transfer": "",
+ "volume:accept_transfer": "",
+ "volume:delete_transfer": "",
+ "volume:get_all_transfers": "",
+
+ "volume_extension:replication:promote": "rule:admin_api",
+ "volume_extension:replication:reenable": "rule:admin_api",
+
+ "backup:create" : "",
+ "backup:delete": "",
+ "backup:get": "",
+ "backup:get_all": "",
+ "backup:restore": "",
+ "backup:backup-import": "rule:admin_api",
+ "backup:backup-export": "rule:admin_api",
+
+ "snapshot_extension:snapshot_actions:update_snapshot_status": "",
+
+ "consistencygroup:create" : "group:nobody",
+ "consistencygroup:delete": "group:nobody",
+ "consistencygroup:get": "group:nobody",
+ "consistencygroup:get_all": "group:nobody",
+
+ "consistencygroup:create_cgsnapshot" : "",
+ "consistencygroup:delete_cgsnapshot": "",
+ "consistencygroup:get_cgsnapshot": "",
+ "consistencygroup:get_all_cgsnapshots": "",
+
+ "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api"
+}
diff --git a/install-files/openstack/usr/share/openstack/extras/00-disable-device.network b/install-files/openstack/usr/share/openstack/extras/00-disable-device.network
new file mode 100644
index 00000000..8e2532d0
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/extras/00-disable-device.network
@@ -0,0 +1,2 @@
+[Match]
+Name={{ item }}
diff --git a/install-files/openstack/usr/share/openstack/extras/60-device-dhcp.network b/install-files/openstack/usr/share/openstack/extras/60-device-dhcp.network
new file mode 100644
index 00000000..6fdbfd8d
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/extras/60-device-dhcp.network
@@ -0,0 +1,5 @@
+[Match]
+Name={{ item }}
+
+[Network]
+DHCP=yes
diff --git a/install-files/openstack/usr/share/openstack/glance.yml b/install-files/openstack/usr/share/openstack/glance.yml
new file mode 100644
index 00000000..aa7e4c78
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance.yml
@@ -0,0 +1,93 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/glance.conf"
+ tasks:
+ - name: Create the glance user.
+ user:
+ name: glance
+ comment: Openstack Glance Daemons
+ shell: /sbin/nologin
+ home: /var/lib/glance
+
+ - name: Create the /var folders for glance
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: glance
+ group: glance
+ with_items:
+ - /var/run/glance
+ - /var/lock/glance
+ - /var/log/glance
+ - /var/lib/glance
+ - /var/lib/glance/images
+ - /var/lib/glance/image-cache
+
+ - name: Create /etc/glance directory
+ file:
+ path: /etc/glance
+ state: directory
+
+ - name: Add the configuration needed for glance in /etc/glance using templates
+ template:
+ src: /usr/share/openstack/glance/{{ item }}
+ dest: /etc/glance/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/glance && find -type f
+
+ - name: Create glance service user in service tenant
+ keystone_user:
+ user: "{{ GLANCE_SERVICE_USER }}"
+ password: "{{ GLANCE_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Assign admin role to glances service user in the service tenant
+ keystone_user:
+ role: admin
+ user: "{{ GLANCE_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add glance endpoint
+ keystone_service:
+ name: glance
+ type: image
+ description: Openstack Image Service
+ publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292
+ internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292
+ adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9292
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for glance
+ postgresql_user:
+ name: "{{ GLANCE_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ password: "{{ GLANCE_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: glance
+
+ - name: Create database for glance services
+ postgresql_db:
+ name: glance
+ owner: "{{ GLANCE_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ sudo: yes
+ sudo_user: glance
+
+ - name: Initiate glance database
+ glance_manage:
+ action: dbsync
+ sudo: yes
+ sudo_user: glance
+
+ - name: Enable and start openstack-glance services
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - openstack-glance-api.service
+ - openstack-glance-registry.service
diff --git a/install-files/openstack/usr/share/openstack/glance/glance-api-paste.ini b/install-files/openstack/usr/share/openstack/glance/glance-api-paste.ini
new file mode 100644
index 00000000..86a4cdb1
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance/glance-api-paste.ini
@@ -0,0 +1,77 @@
+# Use this pipeline for no auth or image caching - DEFAULT
+[pipeline:glance-api]
+pipeline = versionnegotiation osprofiler unauthenticated-context rootapp
+
+# Use this pipeline for image caching and no auth
+[pipeline:glance-api-caching]
+pipeline = versionnegotiation osprofiler unauthenticated-context cache rootapp
+
+# Use this pipeline for caching w/ management interface but no auth
+[pipeline:glance-api-cachemanagement]
+pipeline = versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-api-keystone]
+pipeline = versionnegotiation osprofiler authtoken context rootapp
+
+# Use this pipeline for keystone auth with image caching
+[pipeline:glance-api-keystone+caching]
+pipeline = versionnegotiation osprofiler authtoken context cache rootapp
+
+# Use this pipeline for keystone auth with caching and cache management
+[pipeline:glance-api-keystone+cachemanagement]
+pipeline = versionnegotiation osprofiler authtoken context cache cachemanage rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-api-trusted-auth]
+pipeline = versionnegotiation osprofiler context rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user and uses cache management
+[pipeline:glance-api-trusted-auth+cachemanagement]
+pipeline = versionnegotiation osprofiler context cache cachemanage rootapp
+
+[composite:rootapp]
+paste.composite_factory = glance.api:root_app_factory
+/: apiversions
+/v1: apiv1app
+/v2: apiv2app
+
+[app:apiversions]
+paste.app_factory = glance.api.versions:create_resource
+
+[app:apiv1app]
+paste.app_factory = glance.api.v1.router:API.factory
+
+[app:apiv2app]
+paste.app_factory = glance.api.v2.router:API.factory
+
+[filter:versionnegotiation]
+paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
+
+[filter:cache]
+paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
+
+[filter:cachemanage]
+paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+delay_auth_decision = true
+
+[filter:gzip]
+paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
diff --git a/install-files/openstack/usr/share/openstack/glance/glance-api.conf b/install-files/openstack/usr/share/openstack/glance/glance-api.conf
new file mode 100644
index 00000000..39257a6d
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance/glance-api.conf
@@ -0,0 +1,699 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Which backend scheme should Glance use by default is not specified
+# in a request to add a new image to Glance? Known schemes are determined
+# by the known_stores option below.
+# Default: 'file'
+default_store = file
+
+# Maximum image size (in bytes) that may be uploaded through the
+# Glance API server. Defaults to 1 TB.
+# WARNING: this value should only be increased after careful consideration
+# and must be set to a value under 8 EB (9223372036854775808).
+#image_size_cap = 1099511627776
+
+# Address to bind the API server
+bind_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the bind the API server to
+bind_port = 9292
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+# log_file = /var/log/glance/api.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+# data_api = glance.db.sqlalchemy.api
+
+# The number of child process workers that will be
+# created to service API requests. The default will be
+# equal to the number of CPUs available. (integer value)
+#workers = 4
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large tokens
+# (typically those generated by the Keystone v3 API with big service
+# catalogs)
+# max_header_line = 16384
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware.
+#allow_anonymous_access = False
+
+# Allow access to version 1 of glance api
+#enable_v1_api = True
+
+# Allow access to version 2 of glance api
+#enable_v2_api = True
+
+# Return the URL that references where the data is stored on
+# the backend storage system. For example, if using the
+# file system store a URL of 'file:///path/to/image' will
+# be returned to the user in the 'direct_url' meta-data field.
+# The default value is false.
+#show_image_direct_url = False
+
+# Send headers containing user and tenant information when making requests to
+# the v1 glance registry. This allows the registry to function as if a user is
+# authenticated without the need to authenticate a user itself using the
+# auth_token middleware.
+# The default value is false.
+#send_identity_headers = False
+
+# Supported values for the 'container_format' image attribute
+#container_formats=ami,ari,aki,bare,ovf,ova
+
+# Supported values for the 'disk_format' image attribute
+#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# Property Protections config file
+# This file contains the rules for property protections and the roles/policies
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then the glance-api
+# service will not start.
+#property_protection_file =
+
+# Specify whether 'roles' or 'policies' are used in the
+# property_protection_file.
+# The default value for property_protection_rule_format is 'roles'.
+#property_protection_rule_format = roles
+
+# This value sets what strategy will be used to determine the image location
+# order. Currently two strategies are packaged with Glance 'location_order'
+# and 'store_type'.
+#location_strategy = location_order
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+use_syslog = True
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL0
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting API server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ============ Registry Options ===============================
+
+# Address to find the registry server
+registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# What protocol to use when connecting to the registry server?
+# Set to https for secure HTTP communication
+registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
+#registry_client_key_file = /path/to/key/file
+
+# The path to the cert file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
+#registry_client_cert_file = /path/to/cert/file
+
+# The path to the certifying authority cert file to use in SSL connections
+# to the registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
+#registry_client_ca_file = /path/to/ca/file
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's equivalent of
+# specifying --insecure on the command line using glanceclient for the API
+# Default: False
+#registry_client_insecure = False
+
+# The period of time, in seconds, that the API server will wait for a registry
+# request to complete. A value of '0' implies no timeout.
+# Default: 600
+#registry_client_timeout = 600
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# Pass the user's token through for API requests to the registry.
+# Default: True
+#use_user_token = True
+
+# If 'use_user_token' is not in effect then admin credentials
+# can be specified. Requests to the registry on behalf of
+# the API will use these credentials.
+# Admin user name
+#admin_user = None
+# Admin password
+#admin_password = None
+# Admin tenant name
+#admin_tenant_name = None
+# Keystone endpoint
+#auth_url = None
+# Keystone region
+#auth_region = None
+# Auth strategy
+#auth_strategy = keystone
+
+# ============ Notification System Options =====================
+
+# Driver or drivers to handle sending notifications. Set to
+# 'messaging' to send notifications to a message queue.
+notification_driver = messagingv2
+
+# Default publisher_id for outgoing notifications.
+# default_publisher_id = image.localhost
+
+# Messaging driver used for 'messaging' notifications driver
+rpc_backend=rabbit
+
+# Configuration options if sending notifications via rabbitmq
+rabbit_host = {{ RABBITMQ_HOST }}
+rabbit_port = {{ RABBITMQ_PORT }}
+rabbit_use_ssl = false
+rabbit_userid = {{ RABBITMQ_USER }}
+rabbit_password = {{ RABBITMQ_PASSWORD }}
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+#qpid_notification_exchange = glance
+#qpid_notification_topic = notifications
+#qpid_hostname = localhost
+#qpid_port = 5672
+#qpid_username =
+#qpid_password =
+#qpid_sasl_mechanisms =
+#qpid_reconnect_timeout = 0
+#qpid_reconnect_limit = 0
+#qpid_reconnect_interval_min = 0
+#qpid_reconnect_interval_max = 0
+#qpid_reconnect_interval = 0
+#qpid_heartbeat = 5
+# Set to 'ssl' to enable SSL
+#qpid_protocol = tcp
+#qpid_tcp_nodelay = True
+
+# ============ Delayed Delete Options =============================
+
+# Turn on/off delayed delete
+delayed_delete = False
+
+# Delayed delete time in seconds
+scrub_time = 43200
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-scrubber.conf
+scrubber_datadir = /var/lib/glance/scrubber
+
+# =============== Quota Options ==================================
+
+# The maximum number of image members allowed per image
+#image_member_quota = 128
+
+# The maximum number of image properties allowed per image
+#image_property_quota = 128
+
+# The maximum number of tags allowed per image
+#image_tag_quota = 128
+
+# The maximum number of locations allowed per image
+#image_location_quota = 10
+
+# Set a system wide quota for every user. This value is the total number
+# of bytes that a user can use across all storage systems. A value of
+# 0 means unlimited.
+#user_storage_quota = 0
+
+# =============== Image Cache Options =============================
+
+# Base directory that the Image Cache uses
+image_cache_dir = /var/lib/glance/image-cache/
+
+# =============== Database Options =================================
+
+[database]
+# The file name to use with SQLite (string value)
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection=postgresql://{{ GLANCE_DB_USER }}:{{ GLANCE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/glance
+
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+admin_tenant_name = service
+admin_user = {{ GLANCE_SERVICE_USER }}
+admin_password = {{ GLANCE_SERVICE_PASSWORD }}
+revocation_cache_time = 10
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-api-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor=keystone
+
+[store_type_location_strategy]
+# The scheme list to use to get store preference order. The scheme must be
+# registered by one of the stores defined by the 'known_stores' config option.
+# This option will be applied when you using 'store_type' option as image
+# location strategy defined by the 'location_strategy' config option.
+#store_type_preference =
+
+[profiler]
+# If False fully disable profiling feature.
+#enabled = False
+
+# If False doesn't trace SQL requests.
+#trace_sqlalchemy = False
+
+[task]
+# ================= Glance Tasks Options ============================
+
+# Specifies how long (in hours) a task is supposed to live in the tasks DB
+# after succeeding or failing before getting soft-deleted.
+# The default value for task_time_to_live is 48 hours.
+# task_time_to_live = 48
+
+# Specifies which task executor to be used to run the task scripts.
+# The default value for task_executor is eventlet.
+# task_executor = eventlet
+
+# Specifies the maximum number of eventlet threads which can be spun up by
+# the eventlet based task executor to perform execution of Glance tasks.
+# eventlet_executor_pool_size = 1000
+
+[glance_store]
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# Existing but disabled stores:
+# glance.store.rbd.Store,
+# glance.store.s3.Store,
+# glance.store.swift.Store,
+# glance.store.sheepdog.Store,
+# glance.store.cinder.Store,
+# glance.store.gridfs.Store,
+# glance.store.vmware_datastore.Store,
+#stores = glance.store.filesystem.Store,
+# glance.store.http.Store
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir = /var/lib/glance/images/
+
+# A list of directories where image data can be stored.
+# This option may be specified multiple times for specifying multiple store
+# directories. Either one of filesystem_store_datadirs or
+# filesystem_store_datadir option is required. A priority number may be given
+# after each directory entry, separated by a ":".
+# When adding an image, the highest priority directory will be selected, unless
+# there is not enough space available in cases where the image size is already
+# known. If no priority is given, it is assumed to be zero and the directory
+# will be considered for selection last. If multiple directories have the same
+# priority, then the one with the most free space available is selected.
+# If same store is specified multiple times then BadStoreConfiguration
+# exception will be raised.
+#filesystem_store_datadirs = /var/lib/glance/images/:1
+
+# A path to a JSON file that contains metadata describing the storage
+# system. When show_multiple_locations is True the information in this
+# file will be returned with any location that is contained in this
+# store.
+#filesystem_store_metadata_file = None
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# swift_store_config_file = glance-swift.conf
+# This file contains references for each of the configured
+# Swift accounts/backing stores. If used, this option can prevent
+# credentials being stored in the database. Using Swift references
+# is disabled if this config is left blank.
+
+# The reference to the default Swift parameters to use for adding new images.
+# default_swift_reference = 'ref1'
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# If set to True enables multi-tenant storage mode which causes Glance images
+# to be stored in tenant specific Swift accounts.
+#swift_store_multi_tenant = False
+
+# A list of swift ACL strings that will be applied as both read and
+# write ACLs to the containers created by Glance in multi-tenant
+# mode. This grants the specified tenants/users read and write access
+# to all newly created image objects. The standard swift ACL string
+# formats are allowed, including:
+# <tenant_id>:<username>
+# <tenant_name>:<username>
+# *:<username>
+# Multiple ACLs can be combined using a comma separated list, for
+# example: swift_store_admin_tenants = service:glance,*:admin
+#swift_store_admin_tenants =
+
+# The region of the swift endpoint to be used for single tenant. This setting
+# is only necessary if the tenant has multiple swift endpoints.
+#swift_store_region =
+
+# If set to False, disables SSL layer compression of https swift requests.
+# Setting to 'False' may improve performance for images which are already
+# in a compressed format, eg qcow2. If set to True, enables SSL layer
+# compression (provided it is supported by the target swift proxy).
+#swift_store_ssl_compression = True
+
+# The number of times a Swift download will be retried before the
+# request fails
+#swift_store_retry_get_count = 0
+
+# Bypass SSL verification for Swift
+#swift_store_auth_insecure = False
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+#s3_store_object_buffer_dir = /path/to/dir
+
+# When forming a bucket url, boto will either set the bucket name as the
+# subdomain or as the first token of the path. Amazon's S3 service will
+# accept it as the subdomain, but Swift's S3 middleware requires it be
+# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
+#s3_store_bucket_url_format = subdomain
+
+# Size, in MB, should S3 start chunking image files
+# and do a multipart upload in S3. The default is 100MB.
+#s3_store_large_object_size = 100
+
+# Multipart upload part size, in MB, should S3 use when uploading
+# parts. The size must be greater than or equal to
+# 5MB. The default is 10MB.
+#s3_store_large_object_chunk_size = 10
+
+# The number of thread pools to perform a multipart upload
+# in S3. The default is 10.
+#s3_store_thread_pools = 10
+
+# ============ RBD Store Options =============================
+
+# Ceph configuration file path
+# If using cephx authentication, this file should
+# include a reference to the right keyring
+# in a client.<USER> section
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# RADOS user to authenticate as (only applicable if using cephx)
+# If <None>, a default will be chosen based on the client. section
+# in rbd_store_ceph_conf
+#rbd_store_user = <None>
+
+# RADOS pool in which images are stored
+#rbd_store_pool = images
+
+# RADOS images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+#rbd_store_chunk_size = 8
+
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+sheepdog_store_chunk_size = 64
+
+# ============ Cinder Store Options ===============================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ VMware Datastore Store Options =====================
+
+# ESX/ESXi or vCenter Server target system.
+# The server value can be an IP address or a DNS name
+# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
+#vmware_server_host = <None>
+
+# Server username (string value)
+#vmware_server_username = <None>
+
+# Server password (string value)
+#vmware_server_password = <None>
+
+# Inventory path to a datacenter (string value)
+# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
+# should be `ha-datacenter`.
+#vmware_datacenter_path = <None>
+
+# Datastore associated with the datacenter (string value)
+#vmware_datastore_name = <None>
+
+# The number of times we retry on failures
+# e.g., socket error, etc (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks
+# invoked on VMware ESX/VC server in seconds (integer value)
+#vmware_task_poll_interval = 5
+
+# Absolute path of the folder containing the images in the datastore
+# (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# Allow to perform insecure SSL requests to the target system (boolean value)
+#vmware_api_insecure = False
diff --git a/install-files/openstack/usr/share/openstack/glance/glance-cache.conf b/install-files/openstack/usr/share/openstack/glance/glance-cache.conf
new file mode 100644
index 00000000..3f2d4603
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance/glance-cache.conf
@@ -0,0 +1,200 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+# log_file = /var/log/glance/image-cache.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+use_syslog = True
+
+# Directory that the Image Cache writes data to
+image_cache_dir = /var/lib/glance/image-cache/
+
+# Number of seconds after which we should consider an incomplete image to be
+# stalled and eligible for reaping
+image_cache_stall_time = 86400
+
+# Max cache size in bytes
+image_cache_max_size = 10737418240
+
+# Address to find the registry server
+registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# known_stores = glance.store.filesystem.Store,
+# glance.store.http.Store,
+# glance.store.rbd.Store,
+# glance.store.s3.Store,
+# glance.store.swift.Store,
+# glance.store.sheepdog.Store,
+# glance.store.cinder.Store,
+# glance.store.vmware_datastore.Store,
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir = /var/lib/glance/images/
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+# s3_store_object_buffer_dir = /path/to/dir
+
+# ============ Cinder Store Options ===========================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ VMware Datastore Store Options =====================
+
+# ESX/ESXi or vCenter Server target system.
+# The server value can be an IP address or a DNS name
+# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
+#vmware_server_host = <None>
+
+# Server username (string value)
+#vmware_server_username = <None>
+
+# Server password (string value)
+#vmware_server_password = <None>
+
+# Inventory path to a datacenter (string value)
+# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
+# should be `ha-datacenter`.
+#vmware_datacenter_path = <None>
+
+# Datastore associated with the datacenter (string value)
+#vmware_datastore_name = <None>
+
+# The number of times we retry on failures
+# e.g., socket error, etc (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks
+# invoked on VMware ESX/VC server in seconds (integer value)
+#vmware_task_poll_interval = 5
+
+# Absolute path of the folder containing the images in the datastore
+# (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# Allow to perform insecure SSL requests to the target system (boolean value)
+#vmware_api_insecure = False
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+# metadata_encryption_key = <16, 24 or 32 char registry metadata key>
diff --git a/install-files/openstack/usr/share/openstack/glance/glance-registry-paste.ini b/install-files/openstack/usr/share/openstack/glance/glance-registry-paste.ini
new file mode 100644
index 00000000..df403f6e
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance/glance-registry-paste.ini
@@ -0,0 +1,30 @@
+# Use this pipeline for no auth - DEFAULT
+[pipeline:glance-registry]
+pipeline = osprofiler unauthenticated-context registryapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-registry-keystone]
+pipeline = osprofiler authtoken context registryapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-registry-trusted-auth]
+pipeline = osprofiler context registryapp
+
+[app:registryapp]
+paste.app_factory = glance.registry.api:API.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
diff --git a/install-files/openstack/usr/share/openstack/glance/glance-registry.conf b/install-files/openstack/usr/share/openstack/glance/glance-registry.conf
new file mode 100644
index 00000000..302f4138
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance/glance-registry.conf
@@ -0,0 +1,245 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Address to bind the registry server
+bind_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the bind the registry server to
+bind_port = 9191
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+# log_file = /var/log/glance/registry.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package.
+#data_api = glance.db.sqlalchemy.api
+
+# The number of child process workers that will be
+# created to service Registry requests. The default will be
+# equal to the number of CPUs available. (integer value)
+#workers = None
+
+# Enable Registry API versions individually or simultaneously
+#enable_v1_registry = True
+#enable_v2_registry = True
+
+# Limit the api to return `param_limit_max` items in a call to a container. If
+# a larger `limit` query param is provided, it will be reduced to this value.
+api_limit_max = 1000
+
+# If a `limit` query param is not provided in an api request, it will
+# default to `limit_param_default`
+limit_param_default = 25
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+use_syslog = True
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL1
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting registry server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting registry server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ============ Notification System Options =====================
+
+# Driver or drivers to handle sending notifications. Set to
+# 'messaging' to send notifications to a message queue.
+notification_driver = messagingv2
+
+# Default publisher_id for outgoing notifications.
+# default_publisher_id = image.localhost
+
+# Messaging driver used for 'messaging' notifications driver
+rpc_backend=rabbit
+
+# Configuration options if sending notifications via rabbitmq
+rabbit_host = {{ RABBITMQ_HOST }}
+rabbit_port = {{ RABBITMQ_PORT }}
+rabbit_use_ssl = false
+rabbit_userid = {{ RABBITMQ_USER }}
+rabbit_password = {{ RABBITMQ_PASSWORD }}
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+qpid_notification_exchange = glance
+qpid_notification_topic = notifications
+qpid_hostname = localhost
+qpid_port = 5672
+qpid_username =
+qpid_password =
+qpid_sasl_mechanisms =
+qpid_reconnect_timeout = 0
+qpid_reconnect_limit = 0
+qpid_reconnect_interval_min = 0
+qpid_reconnect_interval_max = 0
+qpid_reconnect_interval = 0
+qpid_heartbeat = 5
+# Set to 'ssl' to enable SSL
+qpid_protocol = tcp
+qpid_tcp_nodelay = True
+
+
+# ================= Database Options ==========================
+
+[database]
+# The file name to use with SQLite (string value)
+#sqlite_db = glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection=postgresql://{{ GLANCE_DB_USER }}:{{ GLANCE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/glance
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+admin_tenant_name = service
+admin_user = {{ GLANCE_SERVICE_USER }}
+admin_password = {{ GLANCE_SERVICE_PASSWORD }}
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-registry-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-registry-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor=keystone
+
+[profiler]
+# If False fully disable profiling feature.
+#enabled = False
+
+# If False doesn't trace SQL requests.
+#trace_sqlalchemy = False
diff --git a/install-files/openstack/usr/share/openstack/glance/glance-scrubber.conf b/install-files/openstack/usr/share/openstack/glance/glance-scrubber.conf
new file mode 100644
index 00000000..cdbfda71
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance/glance-scrubber.conf
@@ -0,0 +1,108 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+# log_file = /var/log/glance/scrubber.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+use_syslog = True
+
+# Should we run our own loop or rely on cron/scheduler to run us
+daemon = False
+
+# Loop time between checking for new items to schedule for delete
+wakeup_time = 300
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-api.conf
+scrubber_datadir = /var/lib/glance/scrubber
+
+# Only one server in your deployment should be designated the cleanup host
+cleanup_scrubber = False
+
+# pending_delete items older than this time are candidates for cleanup
+cleanup_scrubber_time = 86400
+
+# Address to find the registry server for cleanups
+registry_host = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+#data_api = glance.db.sqlalchemy.api
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ================= Database Options ===============+==========
+
+[database]
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+#connection=sqlite:////glance/openstack/common/db/$sqlite_db
+
+# The SQLAlchemy connection string used to connect to the
+# slave database (string value)
+#slave_connection=
+
+# timeout before idle sql connections are reaped (integer
+# value)
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+#max_pool_size=<None>
+
+# maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+#max_retries=10
+
+# interval between retries of opening a sql connection
+# (integer value)
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+#connection_trace=false
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+#pool_timeout=<None>
diff --git a/install-files/openstack/usr/share/openstack/glance/logging.conf b/install-files/openstack/usr/share/openstack/glance/logging.conf
new file mode 100644
index 00000000..7e7f31f0
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance/logging.conf
@@ -0,0 +1,54 @@
+[loggers]
+keys=root,api,registry,combined
+
+[formatters]
+keys=normal,normal_with_name,debug
+
+[handlers]
+keys=production,file,devel
+
+[logger_root]
+level=NOTSET
+handlers=devel
+
+[logger_api]
+level=DEBUG
+handlers=devel
+qualname=glance-api
+
+[logger_registry]
+level=DEBUG
+handlers=devel
+qualname=glance-registry
+
+[logger_combined]
+level=DEBUG
+handlers=devel
+qualname=glance-combined
+
+[handler_production]
+class=handlers.SysLogHandler
+level=ERROR
+formatter=normal_with_name
+args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
+
+[handler_file]
+class=FileHandler
+level=DEBUG
+formatter=normal_with_name
+args=('glance.log', 'w')
+
+[handler_devel]
+class=StreamHandler
+level=NOTSET
+formatter=debug
+args=(sys.stdout,)
+
+[formatter_normal]
+format=%(asctime)s %(levelname)s %(message)s
+
+[formatter_normal_with_name]
+format=(%(name)s): %(asctime)s %(levelname)s %(message)s
+
+[formatter_debug]
+format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
diff --git a/install-files/openstack/usr/share/openstack/glance/policy.json b/install-files/openstack/usr/share/openstack/glance/policy.json
new file mode 100644
index 00000000..325f00b2
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance/policy.json
@@ -0,0 +1,52 @@
+{
+ "context_is_admin": "role:admin",
+ "default": "",
+
+ "add_image": "",
+ "delete_image": "",
+ "get_image": "",
+ "get_images": "",
+ "modify_image": "",
+ "publicize_image": "role:admin",
+ "copy_from": "",
+
+ "download_image": "",
+ "upload_image": "",
+
+ "delete_image_location": "",
+ "get_image_location": "",
+ "set_image_location": "",
+
+ "add_member": "",
+ "delete_member": "",
+ "get_member": "",
+ "get_members": "",
+ "modify_member": "",
+
+ "manage_image_cache": "role:admin",
+
+ "get_task": "",
+ "get_tasks": "",
+ "add_task": "",
+ "modify_task": "",
+
+ "get_metadef_namespace": "",
+ "get_metadef_namespaces":"",
+ "modify_metadef_namespace":"",
+ "add_metadef_namespace":"",
+
+ "get_metadef_object":"",
+ "get_metadef_objects":"",
+ "modify_metadef_object":"",
+ "add_metadef_object":"",
+
+ "list_metadef_resource_types":"",
+ "get_metadef_resource_type":"",
+ "add_metadef_resource_type_association":"",
+
+ "get_metadef_property":"",
+ "get_metadef_properties":"",
+ "modify_metadef_property":"",
+ "add_metadef_property":""
+
+}
diff --git a/install-files/openstack/usr/share/openstack/glance/schema-image.json b/install-files/openstack/usr/share/openstack/glance/schema-image.json
new file mode 100644
index 00000000..5aafd6b3
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/glance/schema-image.json
@@ -0,0 +1,28 @@
+{
+ "kernel_id": {
+ "type": "string",
+ "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+ "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
+ },
+ "ramdisk_id": {
+ "type": "string",
+ "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+ "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
+ },
+ "instance_uuid": {
+ "type": "string",
+ "description": "ID of instance used to create this image."
+ },
+ "architecture": {
+ "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+ "type": "string"
+ },
+ "os_distro": {
+ "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+ "type": "string"
+ },
+ "os_version": {
+ "description": "Operating system version as specified by the distributor",
+ "type": "string"
+ }
+}
diff --git a/install-files/openstack/usr/share/openstack/horizon.yml b/install-files/openstack/usr/share/openstack/horizon.yml
new file mode 100644
index 00000000..14cea5c5
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/horizon.yml
@@ -0,0 +1,47 @@
+---
+- hosts: localhost
+ tasks:
+
+# Setup apache, this may end up in apache.yml
+ - name: Create the apache user.
+ user:
+ name: apache
+ comment: Apache Server
+ shell: /sbin/nologin
+ home: /var/www
+
+ - file:
+ path: /usr/sbin/suexec
+ group: apache
+ mode: 4750
+
+# Setup horizon
+ - name: Create the horizon user.
+ user:
+ name: horizon
+ comment: Openstack Horizon User
+ shell: /sbin/nologin
+ home: /var/lib/horizon
+
+ - name: Create the /var folders for horizon
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: horizon
+ group: horizon
+ with_items:
+ - /var/lib/horizon
+
+ - name: Link horizon apache configuration
+ file:
+ src: /etc/horizon/apache-horizon.conf
+ dest: /etc/httpd/conf.d/apache-horizon.conf
+ state: link
+
+ - name: Enable and start apache services needed by horizon
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - apache-httpd.service
diff --git a/install-files/openstack/usr/share/openstack/hosts b/install-files/openstack/usr/share/openstack/hosts
new file mode 100644
index 00000000..5b97818d
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/install-files/openstack/usr/share/openstack/ironic.yml b/install-files/openstack/usr/share/openstack/ironic.yml
new file mode 100644
index 00000000..db0a8aa8
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/ironic.yml
@@ -0,0 +1,104 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/ironic.conf"
+ tasks:
+ - name: Create the ironic user
+ user:
+ name: ironic
+ comment: Openstack Ironic Daemons
+ shell: /sbin/nologin
+ home: /var/lib/ironic
+
+ - name: Create the /var folders for Ironic
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: ironic
+ group: ironic
+ with_items:
+ - /var/run/ironic
+ - /var/lock/ironic
+ - /var/log/ironic
+ - /var/lib/ironic
+
+ - file: path=/etc/ironic state=directory
+ - name: Add the configuration needed for ironic in /etc/ironic using templates
+ template:
+ src: /usr/share/openstack/ironic/{{ item }}
+ dest: /etc/ironic/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/ironic && find -type f
+
+ - name: Create Ironic service user in service tenant
+ keystone_user:
+ user: "{{ IRONIC_SERVICE_USER }}"
+ password: "{{ IRONIC_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Assign admin role to Ironic service user in the service tenant
+ keystone_user:
+ role: admin
+ user: "{{ IRONIC_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add Ironic endpoint
+ keystone_service:
+ name: ironic
+ type: baremetal
+ description: Openstack Ironic Service
+ publicurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385'
+ internalurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385'
+ adminurl: 'http://{{ CONTROLLER_HOST_ADDRESS }}:6385'
+ region: 'regionOne'
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for Ironic
+ postgresql_user:
+ name: "{{ IRONIC_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ password: "{{ IRONIC_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: ironic
+
+ - name: Create database for Ironic services
+ postgresql_db:
+ name: ironic
+ owner: "{{ IRONIC_DB_USER }}"
+ login_host: "{{ CONTROLLER_HOST_ADDRESS }}"
+ sudo: yes
+ sudo_user: ironic
+
+ - name: Initiate Ironic database
+ # Use 'upgrade' instead of 'create_schema' to make the operation
+ # idempotent
+ shell: |
+ ironic-dbsync \
+ --config-file /etc/ironic/ironic.conf upgrade
+ sudo: yes
+ sudo_user: ironic
+
+ - name: Enable and start openstack-ironic services
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - openstack-ironic-conductor.service
+ - openstack-ironic-api.service
+
+ - name: Set owner and group for the tftp root directory
+ file:
+ path: "/srv/tftp_root/"
+ state: directory
+ owner: ironic
+ group: ironic
+ recurse: yes
+
+ - name: Enable and start tftp-hpa
+ service:
+ name: tftp-hpa.socket
+ enabled: yes
+ state: started
diff --git a/install-files/openstack/usr/share/openstack/ironic/ironic.conf b/install-files/openstack/usr/share/openstack/ironic/ironic.conf
new file mode 100644
index 00000000..75c62b8e
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/ironic/ironic.conf
@@ -0,0 +1,1247 @@
+[DEFAULT]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+rabbit_host={{ RABBITMQ_HOST }}
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+rabbit_port={{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBITMQ_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBITMQ_PASSWORD }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=ironic
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in ironic.netconf
+#
+
+# IP address of this host. (string value)
+my_ip={{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+
+# Use IPv6. (boolean value)
+#use_ipv6=false
+
+
+#
+# Options defined in ironic.api.app
+#
+
+# Method to use for authentication: noauth or keystone.
+# (string value)
+#auth_strategy=keystone
+
+
+#
+# Options defined in ironic.common.driver_factory
+#
+
+# Specify the list of drivers to load during service
+# initialization. Missing drivers, or drivers which fail to
+# initialize, will prevent the conductor service from
+# starting. The option default is a recommended set of
+# production-oriented drivers. A complete list of drivers
+# present on your system may be found by enumerating the
+# "ironic.drivers" entrypoint. An example may be found in the
+# developer documentation online. (list value)
+enabled_drivers=pxe_ipmitool,pxe_ssh
+
+
+#
+# Options defined in ironic.common.exception
+#
+
+# Make exception message format errors fatal. (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in ironic.common.hash_ring
+#
+
+# Exponent to determine number of hash partitions to use when
+# distributing load across conductors. Larger values will
+# result in more even distribution of load and less load when
+# rebalancing the ring, but more memory usage. Number of
+# partitions per conductor is (2^hash_partition_exponent).
+# This determines the granularity of rebalancing: given 10
+# hosts, and an exponent of the 2, there are 40 partitions in
+# the ring.A few thousand partitions should make rebalancing
+# smooth in most cases. The default is suitable for up to a
+# few hundred conductors. Too many partitions has a CPU
+# impact. (integer value)
+#hash_partition_exponent=5
+
+# [Experimental Feature] Number of hosts to map onto each hash
+# partition. Setting this to more than one will cause
+# additional conductor services to prepare deployment
+# environments and potentially allow the Ironic cluster to
+# recover more quickly if a conductor instance is terminated.
+# (integer value)
+#hash_distribution_replicas=1
+
+
+#
+# Options defined in ironic.common.images
+#
+
+# Force backing images to raw format. (boolean value)
+#force_raw_images=true
+
+# Path to isolinux binary file. (string value)
+#isolinux_bin=/usr/lib/syslinux/isolinux.bin
+
+# Template file for isolinux configuration file. (string
+# value)
+#isolinux_config_template=$pybasedir/common/isolinux_config.template
+
+
+#
+# Options defined in ironic.common.paths
+#
+
+# Directory where the ironic python module is installed.
+# (string value)
+#pybasedir=/usr/lib/python/site-packages/ironic
+
+# Directory where ironic binaries are installed. (string
+# value)
+#bindir=$pybasedir/bin
+
+# Top-level directory for maintaining ironic's state. (string
+# value)
+#state_path=$pybasedir
+
+
+#
+# Options defined in ironic.common.policy
+#
+
+# JSON file representing policy. (string value)
+#policy_file=policy.json
+
+# Rule checked when requested rule is not found. (string
+# value)
+#policy_default_rule=default
+
+
+#
+# Options defined in ironic.common.service
+#
+
+# Seconds between running periodic tasks. (integer value)
+#periodic_interval=60
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address. (string
+# value)
+#host=ironic
+
+
+#
+# Options defined in ironic.common.utils
+#
+
+# Path to the rootwrap configuration file to use for running
+# commands as root. (string value)
+#rootwrap_config=/etc/ironic/rootwrap.conf
+
+# Explicitly specify the temporary working directory. (string
+# value)
+#tempdir=<None>
+
+
+#
+# Options defined in ironic.drivers.modules.image_cache
+#
+
+# Run image downloads and raw format conversions in parallel.
+# (boolean value)
+#parallel_image_downloads=false
+
+
+#
+# Options defined in ironic.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in ironic.openstack.common.lockutils
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in ironic.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+use_syslog=True
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in ironic.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+[agent]
+
+#
+# Options defined in ironic.drivers.modules.agent
+#
+
+# Additional append parameters for baremetal PXE boot. (string
+# value)
+#agent_pxe_append_params=nofb nomodeset vga=normal
+
+# Template file for PXE configuration. (string value)
+#agent_pxe_config_template=$pybasedir/drivers/modules/agent_config.template
+
+# Neutron bootfile DHCP parameter. (string value)
+#agent_pxe_bootfile_name=pxelinux.0
+
+# Maximum interval (in seconds) for agent heartbeats. (integer
+# value)
+#heartbeat_timeout=300
+
+
+#
+# Options defined in ironic.drivers.modules.agent_client
+#
+
+# API version to use for communicating with the ramdisk agent.
+# (string value)
+#agent_api_version=v1
+
+
+[api]
+
+#
+# Options defined in ironic.api
+#
+
+# The listen IP for the Ironic API server. (string value)
+#host_ip=0.0.0.0
+
+# The port for the Ironic API server. (integer value)
+#port=6385
+
+# The maximum number of items returned in a single response
+# from a collection resource. (integer value)
+#max_limit=1000
+
+
+[conductor]
+
+#
+# Options defined in ironic.conductor.manager
+#
+
+# URL of Ironic API service. If not set ironic can get the
+# current value from the keystone service catalog. (string
+# value)
+api_url=http://{{ MANAGEMENT_INTERFACE_IP_ADDRESS }}:6385
+
+# Seconds between conductor heart beats. (integer value)
+#heartbeat_interval=10
+
+# Maximum time (in seconds) since the last check-in of a
+# conductor. (integer value)
+#heartbeat_timeout=60
+
+# Interval between syncing the node power state to the
+# database, in seconds. (integer value)
+#sync_power_state_interval=60
+
+# Interval between checks of provision timeouts, in seconds.
+# (integer value)
+#check_provision_state_interval=60
+
+# Timeout (seconds) for waiting callback from deploy ramdisk.
+# 0 - unlimited. (integer value)
+#deploy_callback_timeout=1800
+
+# During sync_power_state, should the hardware power state be
+# set to the state recorded in the database (True) or should
+# the database be updated based on the hardware state (False).
+# (boolean value)
+#force_power_state_during_sync=true
+
+# During sync_power_state failures, limit the number of times
+# Ironic should try syncing the hardware node power state with
+# the node power state in DB (integer value)
+#power_state_sync_max_retries=3
+
+# Maximum number of worker threads that can be started
+# simultaneously by a periodic task. Should be less than RPC
+# thread pool size. (integer value)
+#periodic_max_workers=8
+
+# The size of the workers greenthread pool. (integer value)
+#workers_pool_size=100
+
+# Number of attempts to grab a node lock. (integer value)
+#node_locked_retry_attempts=3
+
+# Seconds to sleep between node lock attempts. (integer value)
+#node_locked_retry_interval=1
+
+# Enable sending sensor data message via the notification bus
+# (boolean value)
+#send_sensor_data=false
+
+# Seconds between conductor sending sensor data message to
+# ceilometer via the notification bus. (integer value)
+#send_sensor_data_interval=600
+
+# List of comma separated metric types which need to be sent
+# to Ceilometer. The default value, "ALL", is a special value
+# meaning send all the sensor data. (list value)
+#send_sensor_data_types=ALL
+
+# When conductors join or leave the cluster, existing
+# conductors may need to update any persistent local state as
+# nodes are moved around the cluster. This option controls how
+# often, in seconds, each conductor will check for nodes that
+# it should "take over". Set it to a negative value to disable
+# the check entirely. (integer value)
+#sync_local_state_interval=180
+
+
+[console]
+
+#
+# Options defined in ironic.drivers.modules.console_utils
+#
+
+# Path to serial console terminal program (string value)
+#terminal=shellinaboxd
+
+# Directory containing the terminal SSL cert(PEM) for serial
+# console access (string value)
+#terminal_cert_dir=<None>
+
+# Directory for holding terminal pid files. If not specified,
+# the temporary directory will be used. (string value)
+#terminal_pid_dir=<None>
+
+# Time interval (in seconds) for checking the status of
+# console subprocess. (integer value)
+#subprocess_checking_interval=1
+
+# Time (in seconds) to wait for the console subprocess to
+# start. (integer value)
+#subprocess_timeout=10
+
+
+[database]
+
+#
+# Options defined in oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection=postgresql://{{ IRONIC_DB_USER}}:{{ IRONIC_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/ironic
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost. (boolean value)
+#use_db_reconnect=false
+
+# Seconds between database connection retries. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between database connection
+# retries up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between
+# database connection retries. (integer value)
+#db_max_retry_interval=10
+
+# Maximum database connection retries before error is raised.
+# Set to -1 to specify an infinite retry count. (integer
+# value)
+#db_max_retries=20
+
+
+#
+# Options defined in ironic.db.sqlalchemy.models
+#
+
+# MySQL engine to use. (string value)
+#mysql_engine=InnoDB
+
+
+[dhcp]
+
+#
+# Options defined in ironic.common.dhcp_factory
+#
+
+# DHCP provider to use. "neutron" uses Neutron, and "none"
+# uses a no-op provider. (string value)
+#dhcp_provider=neutron
+
+
+[disk_partitioner]
+
+#
+# Options defined in ironic.common.disk_partitioner
+#
+
+# After Ironic has completed creating the partition table, it
+# continues to check for activity on the attached iSCSI device
+# status at this interval prior to copying the image to the
+# node, in seconds (integer value)
+#check_device_interval=1
+
+# The maximum number of times to check that the device is not
+# accessed by another process. If the device is still busy
+# after that, the disk partitioning will be treated as having
+# failed. (integer value)
+#check_device_max_retries=20
+
+
+[glance]
+
+#
+# Options defined in ironic.common.glance_service.v2.image_service
+#
+
+# A list of URL schemes that can be downloaded directly via
+# the direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+# The secret token given to Swift to allow temporary URL
+# downloads. Required for temporary URLs. (string value)
+#swift_temp_url_key=<None>
+
+# The length of time in seconds that the temporary URL will be
+# valid for. Defaults to 20 minutes. If some deploys get a 401
+# response code when trying to download from the temporary
+# URL, try raising this duration. (integer value)
+#swift_temp_url_duration=1200
+
+# The "endpoint" (scheme, hostname, optional port) for the
+# Swift URL of the form
+# "endpoint_url/api_version/account/container/object_id". Do
+# not include trailing "/". For example, use
+# "https://swift.example.com". Required for temporary URLs.
+# (string value)
+#swift_endpoint_url=<None>
+
+# The Swift API version to create a temporary URL for.
+# Defaults to "v1". Swift temporary URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_api_version=v1
+
+# The account that Glance uses to communicate with Swift. The
+# format is "AUTH_uuid". "uuid" is the UUID for the account
+# configured in the glance-api.conf. Required for temporary
+# URLs. For example:
+# "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". Swift temporary
+# URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_account=<None>
+
+# The Swift container Glance is configured to store its images
+# in. Defaults to "glance", which is the default in glance-
+# api.conf. Swift temporary URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_container=glance
+
+
+#
+# Options defined in ironic.common.image_service
+#
+
+# Default glance hostname or IP address. (string value)
+glance_host={{ CONTROLLER_HOST_ADDRESS }}
+
+# Default glance port. (integer value)
+#glance_port=9292
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#glance_protocol=http
+
+# A list of the glance api servers available to ironic. Prefix
+# with https:// for SSL-based glance API servers. Format is
+# [hostname|IP]:port. (string value)
+#glance_api_servers=<None>
+
+# Allow to perform insecure SSL (https) requests to glance.
+# (boolean value)
+#glance_api_insecure=false
+
+# Number of retries when downloading an image from glance.
+# (integer value)
+#glance_num_retries=0
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#auth_strategy=keystone
+
+
+[ilo]
+
+#
+# Options defined in ironic.drivers.modules.ilo.common
+#
+
+# Timeout (in seconds) for iLO operations (integer value)
+#client_timeout=60
+
+# Port to be used for iLO operations (integer value)
+#client_port=443
+
+# The Swift iLO container to store data. (string value)
+#swift_ilo_container=ironic_ilo_container
+
+# Amount of time in seconds for Swift objects to auto-expire.
+# (integer value)
+#swift_object_expiry_timeout=900
+
+
+#
+# Options defined in ironic.drivers.modules.ilo.power
+#
+
+# Number of times a power operation needs to be retried
+# (integer value)
+#power_retry=6
+
+# Amount of time in seconds to wait in between power
+# operations (integer value)
+#power_wait=2
+
+
+[ipmi]
+
+#
+# Options defined in ironic.drivers.modules.ipminative
+#
+
+# Maximum time in seconds to retry IPMI operations. (integer
+# value)
+#retry_timeout=60
+
+# Minimum time, in seconds, between IPMI operations sent to a
+# server. There is a risk with some hardware that setting this
+# too low may cause the BMC to crash. Recommended setting is 5
+# seconds. (integer value)
+#min_command_interval=5
+
+
+[keystone_authtoken]
+
+#
+# Options defined in keystonemiddleware.auth_token
+#
+
+# Prefix to prepend at the beginning of the path. Deprecated,
+# use identity_uri. (string value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint. Deprecated,
+# use identity_uri. (string value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use
+# identity_uri. (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https).
+# Deprecated, use identity_uri. (string value)
+#auth_protocol=https
+
+# Complete public Identity API endpoint (string value)
+auth_uri=http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+
+# Complete admin Identity API endpoint. This should specify
+# the unversioned root endpoint e.g. https://localhost:35357/
+# (string value)
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+
+# API version of the admin Identity API endpoint (string
+# value)
+#auth_version=<None>
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (boolean value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# This option is deprecated and may be removed in a future
+# release. Single shared secret with the Keystone
+# configuration used for bootstrapping a Keystone
+# installation, or otherwise bypassing the normal
+# authentication process. This option should not be used, use
+# `admin_user` and `admin_password` instead. (string value)
+#admin_token=<None>
+
+# Keystone account username (string value)
+admin_user={{ IRONIC_SERVICE_USER }}
+
+# Keystone account password (string value)
+admin_password={{ IRONIC_SERVICE_PASSWORD }}
+
+# Keystone service account tenant name to validate user tokens
+# (string value)
+admin_tenant_name=service
+
+# Env key for the swift cache (string value)
+#cache=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#certfile=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPs connections. Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens (string
+# value)
+#signing_dir=<None>
+
+# Optionally specify a list of memcached server(s) to use for
+# caching. If left undefined, tokens will instead be cached
+# in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive effort spent validating
+# tokens, the middleware caches previously-seen tokens for a
+# configurable duration (in seconds). Set to -1 to disable
+# caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens
+# is retrieved from the Identity service (in seconds). A high
+# number of revocation events combined with a low cache
+# duration may significantly reduce performance. (integer
+# value)
+#revocation_cache_time=10
+
+# (optional) if defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=<None>
+
+# (optional, mandatory if memcache_security_strategy is
+# defined) this string is used for key derivation. (string
+# value)
+#memcache_secret_key=<None>
+
+# (optional) number of seconds memcached server is considered
+# dead before it is tried again. (integer value)
+#memcache_pool_dead_retry=300
+
+# (optional) max total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize=10
+
+# (optional) socket timeout in seconds for communicating with
+# a memcache server. (integer value)
+#memcache_pool_socket_timeout=3
+
+# (optional) number of seconds a connection to memcached is
+# held unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout=60
+
+# (optional) number of seconds that an operation will wait to
+# get a memcache client connection from the pool. (integer
+# value)
+#memcache_pool_conn_get_timeout=10
+
+# (optional) use the advanced (eventlet safe) memcache client
+# pool. The advanced pool will only work under python 2.x.
+# (boolean value)
+#memcache_use_advanced_pool=false
+
+# (optional) indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+# If true, the revocation list will be checked for cached
+# tokens. This requires that PKI tokens are configured on the
+# Keystone server. (boolean value)
+#check_revocations_for_cached=false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a
+# single algorithm or multiple. The algorithms are those
+# supported by Python standard hashlib.new(). The hashes will
+# be tried in the order given, so put the preferred one first
+# for performance. The result of the first hash will be stored
+# in the cache. This will typically be set to multiple values
+# only while migrating from a less secure algorithm to a more
+# secure one. Once all the old tokens are expired this option
+# should be set to a single value for better performance.
+# (list value)
+#hash_algorithms=md5
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[neutron]
+
+#
+# Options defined in ironic.dhcp.neutron
+#
+
+# URL for connecting to neutron. (string value)
+url=http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+
+# Timeout value for connecting to neutron in seconds. (integer
+# value)
+#url_timeout=30
+
+# Default authentication strategy to use when connecting to
+# neutron. Can be either "keystone" or "noauth". Running
+# neutron in noauth mode (related to but not affected by this
+# setting) is insecure and should only be used for testing.
+# (string value)
+#auth_strategy=keystone
+
+
+[pxe]
+
+#
+# Options defined in ironic.drivers.modules.iscsi_deploy
+#
+
+# Additional append parameters for baremetal PXE boot. (string
+# value)
+#pxe_append_params=nofb nomodeset vga=normal
+
+# Default file system format for ephemeral partition, if one
+# is created. (string value)
+#default_ephemeral_format=ext4
+
+# Directory where images are stored on disk. (string value)
+#images_path=/var/lib/ironic/images/
+
+# Directory where master instance images are stored on disk.
+# (string value)
+#instance_master_path=/var/lib/ironic/master_images
+
+# Maximum size (in MiB) of cache for master images, including
+# those in use. (integer value)
+#image_cache_size=20480
+
+# Maximum TTL (in minutes) for old master images in cache.
+# (integer value)
+#image_cache_ttl=10080
+
+# The disk devices to scan while doing the deploy. (string
+# value)
+#disk_devices=cciss/c0d0,sda,hda,vda
+
+
+#
+# Options defined in ironic.drivers.modules.pxe
+#
+
+# Template file for PXE configuration. (string value)
+#pxe_config_template=$pybasedir/drivers/modules/pxe_config.template
+
+# Template file for PXE configuration for UEFI boot loader.
+# (string value)
+#uefi_pxe_config_template=$pybasedir/drivers/modules/elilo_efi_pxe_config.template
+
+# IP address of Ironic compute node's tftp server. (string
+# value)
+#tftp_server=$my_ip
+
+# Ironic compute node's tftp root path. (string value)
+tftp_root=/srv/tftp_root/
+
+# Directory where master tftp images are stored on disk.
+# (string value)
+tftp_master_path=/srv/tftp_root/master_images
+
+# Bootfile DHCP parameter. (string value)
+#pxe_bootfile_name=pxelinux.0
+
+# Bootfile DHCP parameter for UEFI boot mode. (string value)
+#uefi_pxe_bootfile_name=elilo.efi
+
+# Ironic compute node's HTTP server URL. Example:
+# http://192.1.2.3:8080 (string value)
+#http_url=<None>
+
+# Ironic compute node's HTTP root path. (string value)
+#http_root=/httpboot
+
+# Enable iPXE boot. (boolean value)
+#ipxe_enabled=false
+
+# The path to the main iPXE script file. (string value)
+#ipxe_boot_script=$pybasedir/drivers/modules/boot.ipxe
+
+
+[seamicro]
+
+#
+# Options defined in ironic.drivers.modules.seamicro
+#
+
+# Maximum retries for SeaMicro operations (integer value)
+#max_retry=3
+
+# Seconds to wait for power action to be completed (integer
+# value)
+#action_timeout=10
+
+
+[snmp]
+
+#
+# Options defined in ironic.drivers.modules.snmp
+#
+
+# Seconds to wait for power action to be completed (integer
+# value)
+#power_timeout=10
+
+
+[ssh]
+
+#
+# Options defined in ironic.drivers.modules.ssh
+#
+
+# libvirt uri (string value)
+#libvirt_uri=qemu:///system
+
+
+[swift]
+
+#
+# Options defined in ironic.common.swift
+#
+
+# Maximum number of times to retry a Swift request, before
+# failing. (integer value)
+#swift_max_retries=2
+
+
diff --git a/install-files/openstack/usr/share/openstack/ironic/policy.json b/install-files/openstack/usr/share/openstack/ironic/policy.json
new file mode 100644
index 00000000..94ac3a5b
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/ironic/policy.json
@@ -0,0 +1,5 @@
+{
+ "admin": "role:admin or role:administrator",
+ "admin_api": "is_admin:True",
+ "default": "rule:admin_api"
+}
diff --git a/install-files/openstack/usr/share/openstack/iscsi.yml b/install-files/openstack/usr/share/openstack/iscsi.yml
new file mode 100644
index 00000000..b80377ae
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/iscsi.yml
@@ -0,0 +1,15 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Update kernel module dependencies
+ command: depmod -a
+
+ - name: generate InitiatorName for iscsi
+ shell: iscsi-iname
+ register: initiator_name
+
+ - lineinfile:
+ dest: /etc/iscsi/initiatorname.iscsi
+ regexp: '^InitiatorName=$'
+ line: 'InitiatorName={{ initiator_name.stdout }}'
+ backrefs: yes
diff --git a/install-files/openstack/usr/share/openstack/keystone.yml b/install-files/openstack/usr/share/openstack/keystone.yml
new file mode 100644
index 00000000..330d74d0
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/keystone.yml
@@ -0,0 +1,143 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/keystone.conf"
+ tasks:
+
+ # RabbitMQ configuration, this may end up in a different playbook
+ - name: Create rabbitmq user
+ user:
+ name: rabbitmq
+ comment: Rabbitmq server daemon
+ shell: /sbin/nologin
+ home: /var/lib/rabbitmq
+
+ - name: Create the rabbitmq directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: rabbitmq
+ group: rabbitmq
+ with_items:
+ - /var/run/rabbitmq
+ - /var/log/rabbitmq
+ - /etc/rabbitmq
+
+ - name: Add the configuration needed for rabbitmq in /etc/rabbitmq using templates
+ template:
+ src: /usr/share/openstack/rabbitmq/{{ item }}
+ dest: /etc/rabbitmq/{{ item }}
+ owner: rabbitmq
+ group: rabbitmq
+ mode: 0644
+ with_items:
+ - rabbitmq.config
+ - rabbitmq-env.conf
+
+ - name: Enable and start rabbitmq services
+ service:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ with_items:
+ - rabbitmq-server
+
+ # Keystone configuration
+ - name: Create the keystone user.
+ user:
+ name: keystone
+ comment: Openstack Keystone Daemons
+ shell: /sbin/nologin
+ home: /var/lib/keystone
+
+ - name: Create the /var folders for keystone
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: keystone
+ group: keystone
+ with_items:
+ - /var/run/keystone
+ - /var/lock/keystone
+ - /var/log/keystone
+ - /var/lib/keystone
+
+ - name: Create /etc/keystone directory
+ file:
+ path: /etc/keystone
+ state: directory
+
+ - name: Add the configuration needed for keystone in /etc using templates
+ template:
+ src: /usr/share/openstack/keystone/{{ item }}
+ dest: /etc/keystone/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack/keystone && find -type f
+
+ - name: Create postgresql user for keystone
+ postgresql_user:
+ name: "{{ KEYSTONE_DB_USER }}"
+ password: "{{ KEYSTONE_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: keystone
+
+ - name: Create database for keystone services
+ postgresql_db:
+ name: keystone
+ owner: "{{ KEYSTONE_DB_USER }}"
+ sudo: yes
+ sudo_user: keystone
+
+ - name: Initiatie keystone database
+ keystone_manage:
+ action: dbsync
+ sudo: yes
+ sudo_user: keystone
+
+ - name: Enable and start openstack-keystone service
+ service:
+ name: openstack-keystone.service
+ enabled: yes
+ state: started
+
+ - name: Create admin tenant
+ keystone_user:
+ tenant: admin
+ tenant_description: Admin Tenant
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+ - name: Create admin user for the admin tenant
+ keystone_user:
+ user: admin
+ tenant: admin
+ password: "{{ KEYSTONE_ADMIN_PASSWORD }}"
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+ - name: Create admin role for admin user in the admin tenant
+ keystone_user:
+ role: admin
+ user: admin
+ tenant: admin
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+ - name: Create service tenant
+ keystone_user:
+ tenant: service
+ tenant_description: Service Tenant
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+ - name: Add keystone endpoint
+ keystone_service:
+ name: keystone
+ type: identity
+ description: Keystone Identity Service
+ publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+ internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+ adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+ endpoint: http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
diff --git a/install-files/openstack/usr/share/openstack/keystone/keystone-paste.ini b/install-files/openstack/usr/share/openstack/keystone/keystone-paste.ini
new file mode 100644
index 00000000..46f994c3
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/keystone/keystone-paste.ini
@@ -0,0 +1,121 @@
+# Keystone PasteDeploy configuration file.
+
+[filter:debug]
+paste.filter_factory = keystone.common.wsgi:Debug.factory
+
+[filter:build_auth_context]
+paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory
+
+[filter:token_auth]
+paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
+
+[filter:admin_token_auth]
+paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
+
+[filter:xml_body]
+paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
+
+[filter:xml_body_v2]
+paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV2.factory
+
+[filter:xml_body_v3]
+paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV3.factory
+
+[filter:json_body]
+paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
+
+[filter:user_crud_extension]
+paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
+
+[filter:crud_extension]
+paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
+
+[filter:ec2_extension]
+paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
+
+[filter:ec2_extension_v3]
+paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory
+
+[filter:federation_extension]
+paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory
+
+[filter:oauth1_extension]
+paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory
+
+[filter:s3_extension]
+paste.filter_factory = keystone.contrib.s3:S3Extension.factory
+
+[filter:endpoint_filter_extension]
+paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory
+
+[filter:endpoint_policy_extension]
+paste.filter_factory = keystone.contrib.endpoint_policy.routers:EndpointPolicyExtension.factory
+
+[filter:simple_cert_extension]
+paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory
+
+[filter:revoke_extension]
+paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory
+
+[filter:url_normalize]
+paste.filter_factory = keystone.middleware:NormalizingFilter.factory
+
+[filter:sizelimit]
+paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
+
+[filter:stats_monitoring]
+paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
+
+[filter:stats_reporting]
+paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
+
+[filter:access_log]
+paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
+
+[app:public_service]
+paste.app_factory = keystone.service:public_app_factory
+
+[app:service_v3]
+paste.app_factory = keystone.service:v3_app_factory
+
+[app:admin_service]
+paste.app_factory = keystone.service:admin_app_factory
+
+[pipeline:public_api]
+# The last item in this pipeline must be public_service or an equivalent
+# application. It cannot be a filter.
+pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension user_crud_extension public_service
+
+[pipeline:admin_api]
+# The last item in this pipeline must be admin_service or an equivalent
+# application. It cannot be a filter.
+pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension s3_extension crud_extension admin_service
+
+[pipeline:api_v3]
+# The last item in this pipeline must be service_v3 or an equivalent
+# application. It cannot be a filter.
+pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v3 json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension service_v3
+
+[app:public_version_service]
+paste.app_factory = keystone.service:public_version_app_factory
+
+[app:admin_version_service]
+paste.app_factory = keystone.service:admin_version_app_factory
+
+[pipeline:public_version_api]
+pipeline = sizelimit url_normalize xml_body public_version_service
+
+[pipeline:admin_version_api]
+pipeline = sizelimit url_normalize xml_body admin_version_service
+
+[composite:main]
+use = egg:Paste#urlmap
+/v2.0 = public_api
+/v3 = api_v3
+/ = public_version_api
+
+[composite:admin]
+use = egg:Paste#urlmap
+/v2.0 = admin_api
+/v3 = api_v3
+/ = admin_version_api
diff --git a/install-files/openstack/usr/share/openstack/keystone/keystone.conf b/install-files/openstack/usr/share/openstack/keystone/keystone.conf
new file mode 100644
index 00000000..4e04c81b
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/keystone/keystone.conf
@@ -0,0 +1,1588 @@
+[DEFAULT]
+
+#
+# Options defined in keystone
+#
+
+# A "shared secret" that can be used to bootstrap Keystone.
+# This "token" does not represent a user, and carries no
+# explicit authorization. To disable in production (highly
+# recommended), remove AdminTokenAuthMiddleware from your
+# paste application pipelines (for example, in keystone-
+# paste.ini). (string value)
+admin_token={{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}
+
+# The IP address of the network interface for the public
+# service to listen on. (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+#public_bind_host=0.0.0.0
+
+# The IP address of the network interface for the admin
+# service to listen on. (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+#admin_bind_host=0.0.0.0
+
+# (Deprecated) The port which the OpenStack Compute service
+# listens on. This option was only used for string replacement
+# in the templated catalog backend. Templated catalogs should
+# replace the "$(compute_port)s" substitution with the static
+# port of the compute service. As of Juno, this option is
+# deprecated and will be removed in the L release. (integer
+# value)
+#compute_port=8774
+
+# The port number which the admin service listens on. (integer
+# value)
+admin_port=35357
+
+# The port number which the public service listens on.
+# (integer value)
+public_port=5000
+
+# The base public endpoint URL for Keystone that is advertised
+# to clients (NOTE: this does NOT affect how Keystone listens
+# for connections). Defaults to the base host URL of the
+# request. E.g. a request to http://server:5000/v2.0/users
+# will default to http://server:5000. You should only need to
+# set this value if the base URL contains a path (e.g.
+# /prefix/v2.0) or the endpoint should be found on a different
+# server. (string value)
+#public_endpoint=<None>
+
+# The base admin endpoint URL for Keystone that is advertised
+# to clients (NOTE: this does NOT affect how Keystone listens
+# for connections). Defaults to the base host URL of the
+# request. E.g. a request to http://server:35357/v2.0/users
+# will default to http://server:35357. You should only need to
+# set this value if the base URL contains a path (e.g.
+# /prefix/v2.0) or the endpoint should be found on a different
+# server. (string value)
+#admin_endpoint=<None>
+
+# The number of worker processes to serve the public WSGI
+# application. Defaults to number of CPUs (minimum of 2).
+# (integer value)
+#public_workers=<None>
+
+# The number of worker processes to serve the admin WSGI
+# application. Defaults to number of CPUs (minimum of 2).
+# (integer value)
+#admin_workers=<None>
+
+# Enforced by optional sizelimit middleware
+# (keystone.middleware:RequestBodySizeLimiter). (integer
+# value)
+#max_request_body_size=114688
+
+# Limit the sizes of user & project ID/names. (integer value)
+#max_param_size=64
+
+# Similar to max_param_size, but provides an exception for
+# token values. (integer value)
+#max_token_size=8192
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the assignment table
+# with explicit role grants. After migration, the
+# member_role_id will be used in the API add_user_to_project.
+# (string value)
+#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab
+
+# During a SQL upgrade member_role_name will be used to create
+# a new role that will replace records in the assignment table
+# with explicit role grants. After migration, member_role_name
+# will be ignored. (string value)
+#member_role_name=_member_
+
+# The value passed as the keyword "rounds" to passlib's
+# encrypt method. (integer value)
+#crypt_strength=40000
+
+# Set this to true if you want to enable TCP_KEEPALIVE on
+# server sockets, i.e. sockets used by the Keystone wsgi
+# server for client connections. (boolean value)
+#tcp_keepalive=false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Only applies if tcp_keepalive is true. Not supported
+# on OS X. (integer value)
+#tcp_keepidle=600
+
+# The maximum number of entities that will be returned in a
+# collection, with no limit set by default. This global limit
+# may be then overridden for a specific driver, by specifying
+# a list_limit in the appropriate section (e.g. [assignment]).
+# (integer value)
+#list_limit=<None>
+
+# Set this to false if you want to enable the ability for
+# user, group and project entities to be moved between domains
+# by updating their domain_id. Allowing such movement is not
+# recommended if the scope of a domain admin is being
+# restricted by use of an appropriate policy file (see
+# policy.v3cloudsample as an example). (boolean value)
+#domain_id_immutable=true
+
+# If set to true, strict password length checking is performed
+# for password manipulation. If a password exceeds the maximum
+# length, the operation will fail with an HTTP 403 Forbidden
+# error. If set to false, passwords are automatically
+# truncated to the maximum length. (boolean value)
+#strict_password_check=false
+
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+rabbit_host={{ RABBITMQ_HOST }}
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+rabbit_port={{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBITMQ_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBITMQ_PASSWORD }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=keystone
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=keystone
+
+
+#
+# Options defined in keystone.notifications
+#
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in keystone.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in keystone.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+use_syslog=True
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in keystone.openstack.common.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+
+[assignment]
+
+#
+# Options defined in keystone
+#
+
+# Assignment backend driver. (string value)
+#driver=<None>
+
+# Toggle for assignment caching. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# TTL (in seconds) to cache assignment data. This has no
+# effect unless global caching is enabled. (integer value)
+#cache_time=<None>
+
+# Maximum number of entities that will be returned in an
+# assignment collection. (integer value)
+#list_limit=<None>
+
+
+[auth]
+
+#
+# Options defined in keystone
+#
+
+# Default auth methods. (list value)
+#methods=external,password,token
+
+# The password auth plugin module. (string value)
+#password=keystone.auth.plugins.password.Password
+
+# The token auth plugin module. (string value)
+#token=keystone.auth.plugins.token.Token
+
+# The external (REMOTE_USER) auth plugin module. (string
+# value)
+#external=keystone.auth.plugins.external.DefaultDomain
+
+
+[cache]
+
+#
+# Options defined in keystone
+#
+
+# Prefix for building the configuration dictionary for the
+# cache region. This should not need to be changed unless
+# there is another dogpile.cache region with the same
+# configuration name. (string value)
+#config_prefix=cache.keystone
+
+# Default TTL, in seconds, for any cached item in the
+# dogpile.cache region. This applies to any cached method that
+# doesn't have an explicit cache expiration time defined for
+# it. (integer value)
+#expiration_time=600
+
+# Dogpile.cache backend module. It is recommended that
+# Memcache with pooling (keystone.cache.memcache_pool) or
+# Redis (dogpile.cache.redis) be used in production
+# deployments. Small workloads (single process) like devstack
+# can use the dogpile.cache.memory backend. (string value)
+#backend=keystone.common.cache.noop
+
+# Arguments supplied to the backend module. Specify this
+# option once per argument to be passed to the dogpile.cache
+# backend. Example format: "<argname>:<value>". (multi valued)
+#backend_argument=
+
+# Proxy classes to import that will affect the way the
+# dogpile.cache backend functions. See the dogpile.cache
+# documentation on changing-backend-behavior. (list value)
+#proxies=
+
+# Global toggle for all caching using the should_cache_fn
+# mechanism. (boolean value)
+#enabled=false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls). This is only really useful if you
+# need to see the specific cache-backend get/set/delete calls
+# with the keys/values. Typically this should be left set to
+# false. (boolean value)
+#debug_cache_backend=false
+
+# Memcache servers in the format of "host:port".
+# (dogpile.cache.memcache and keystone.cache.memcache_pool
+# backends only) (list value)
+#memcache_servers=localhost:11211
+
+# Number of seconds memcached server is considered dead before
+# it is tried again. (dogpile.cache.memcache and
+# keystone.cache.memcache_pool backends only) (integer value)
+#memcache_dead_retry=300
+
+# Timeout in seconds for every call to a server.
+# (dogpile.cache.memcache and keystone.cache.memcache_pool
+# backends only) (integer value)
+#memcache_socket_timeout=3
+
+# Max total number of open connections to every memcached
+# server. (keystone.cache.memcache_pool backend only) (integer
+# value)
+#memcache_pool_maxsize=10
+
+# Number of seconds a connection to memcached is held unused
+# in the pool before it is closed.
+# (keystone.cache.memcache_pool backend only) (integer value)
+#memcache_pool_unused_timeout=60
+
+# Number of seconds that an operation will wait to get a
+# memcache client connection. (integer value)
+#memcache_pool_connection_get_timeout=10
+
+
+[catalog]
+
+#
+# Options defined in keystone
+#
+
+# Catalog template file name for use with the template catalog
+# backend. (string value)
+#template_file=default_catalog.templates
+
+# Catalog backend driver. (string value)
+#driver=keystone.catalog.backends.sql.Catalog
+
+# Toggle for catalog caching. This has no effect unless global
+# caching is enabled. (boolean value)
+#caching=true
+
+# Time to cache catalog data (in seconds). This has no effect
+# unless global and catalog caching are enabled. (integer
+# value)
+#cache_time=<None>
+
+# Maximum number of entities that will be returned in a
+# catalog collection. (integer value)
+#list_limit=<None>
+
+# (Deprecated) List of possible substitutions for use in
+# formatting endpoints. Use caution when modifying this list.
+# It will give users with permission to create endpoints the
+# ability to see those values in your configuration file. This
+# option will be removed in Juno. (list value)
+#endpoint_substitution_whitelist=tenant_id,user_id,public_bind_host,admin_bind_host,compute_host,compute_port,admin_port,public_port,public_endpoint,admin_endpoint
+
+
+[credential]
+
+#
+# Options defined in keystone
+#
+
+# Credential backend driver. (string value)
+#driver=keystone.credential.backends.sql.Credential
+
+
+[database]
+
+#
+# Options defined in oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection=<None>
+connection=postgresql://{{ KEYSTONE_DB_USER }}:{{ KEYSTONE_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/keystone
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost. (boolean value)
+#use_db_reconnect=false
+
+# Seconds between database connection retries. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between database connection
+# retries up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between
+# database connection retries. (integer value)
+#db_max_retry_interval=10
+
+# Maximum database connection retries before error is raised.
+# Set to -1 to specify an infinite retry count. (integer
+# value)
+#db_max_retries=20
+
+
+[ec2]
+
+#
+# Options defined in keystone
+#
+
+# EC2Credential backend driver. (string value)
+#driver=keystone.contrib.ec2.backends.kvs.Ec2
+
+
+[endpoint_filter]
+
+#
+# Options defined in keystone
+#
+
+# Endpoint Filter backend driver (string value)
+#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+
+# Toggle to return all active endpoints if no filter exists.
+# (boolean value)
+#return_all_endpoints_if_no_filter=true
+
+
+[endpoint_policy]
+
+#
+# Options defined in keystone
+#
+
+# Endpoint policy backend driver (string value)
+#driver=keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy
+
+
+[federation]
+
+#
+# Options defined in keystone
+#
+
+# Federation backend driver. (string value)
+#driver=keystone.contrib.federation.backends.sql.Federation
+
+# Value to be used when filtering assertion parameters from
+# the environment. (string value)
+#assertion_prefix=
+
+
+[identity]
+
+#
+# Options defined in keystone
+#
+
+# This references the domain to use for all Identity API v2
+# requests (which are not aware of domains). A domain with
+# this ID will be created for you by keystone-manage db_sync
+# in migration 008. The domain referenced by this ID cannot be
+# deleted on the v3 API, to prevent accidentally breaking the
+# v2 API. There is nothing special about this domain, other
+# than the fact that it must exist to order to maintain
+# support for your v2 clients. (string value)
+#default_domain_id=default
+
+# A subset (or all) of domains can have their own identity
+# driver, each with their own partial configuration file in a
+# domain configuration directory. Only values specific to the
+# domain need to be placed in the domain specific
+# configuration file. This feature is disabled by default; set
+# to true to enable. (boolean value)
+#domain_specific_drivers_enabled=false
+
+# Path for Keystone to locate the domain specific identity
+# configuration files if domain_specific_drivers_enabled is
+# set to true. (string value)
+#domain_config_dir=/etc/keystone/domains
+
+# Identity backend driver. (string value)
+#driver=keystone.identity.backends.sql.Identity
+
+# Maximum supported length for user passwords; decrease to
+# improve performance. (integer value)
+#max_password_length=4096
+
+# Maximum number of entities that will be returned in an
+# identity collection. (integer value)
+#list_limit=<None>
+
+
+[identity_mapping]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Identity Mapping backend driver. (string value)
+#driver=keystone.identity.mapping_backends.sql.Mapping
+
+# Public ID generator for user and group entities. The
+# Keystone identity mapper only supports generators that
+# produce no more than 64 characters. (string value)
+#generator=keystone.identity.id_generators.sha256.Generator
+
+# The format of user and group IDs changed in Juno for
+# backends that do not generate UUIDs (e.g. LDAP), with
+# keystone providing a hash mapping to the underlying
+# attribute in LDAP. By default this mapping is disabled,
+# which ensures that existing IDs will not change. Even when
+# the mapping is enabled by using domain specific drivers, any
+# users and groups from the default domain being handled by
+# LDAP will still not be mapped to ensure their IDs remain
+# backward compatible. Setting this value to False will enable
+# the mapping for even the default LDAP driver. It is only
+# safe to do this if you do not already have assignments for
+# users and groups from the default LDAP domain, and it is
+# acceptable for Keystone to provide the different IDs to
+# clients than it did previously. Typically this means that
+# the only time you can set this value to False is when
+# configuring a fresh installation. (boolean value)
+#backward_compatible_ids=true
+
+
+[kvs]
+
+#
+# Options defined in keystone
+#
+
+# Extra dogpile.cache backend modules to register with the
+# dogpile.cache library. (list value)
+#backends=
+
+# Prefix for building the configuration dictionary for the KVS
+# region. This should not need to be changed unless there is
+# another dogpile.cache region with the same configuration
+# name. (string value)
+#config_prefix=keystone.kvs
+
+# Toggle to disable using a key-mangling function to ensure
+# fixed length keys. This is toggle-able for debugging
+# purposes, it is highly recommended to always leave this set
+# to true. (boolean value)
+#enable_key_mangler=true
+
+# Default lock timeout for distributed locking. (integer
+# value)
+#default_lock_timeout=5
+
+
+[ldap]
+
+#
+# Options defined in keystone
+#
+
+# URL for connecting to the LDAP server. (string value)
+#url=ldap://localhost
+
+# User BindDN to query the LDAP server. (string value)
+#user=<None>
+
+# Password for the BindDN to query the LDAP server. (string
+# value)
+#password=<None>
+
+# LDAP server suffix (string value)
+#suffix=cn=example,cn=com
+
+# If true, will add a dummy member to groups. This is required
+# if the objectclass for groups requires the "member"
+# attribute. (boolean value)
+#use_dumb_member=false
+
+# DN of the "dummy member" to use when "use_dumb_member" is
+# enabled. (string value)
+#dumb_member=cn=dumb,dc=nonexistent
+
+# Delete subtrees using the subtree delete control. Only
+# enable this option if your LDAP server supports subtree
+# deletion. (boolean value)
+#allow_subtree_delete=false
+
+# The LDAP scope for queries, this can be either "one"
+# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree).
+# (string value)
+#query_scope=one
+
+# Maximum results per page; a value of zero ("0") disables
+# paging. (integer value)
+#page_size=0
+
+# The LDAP dereferencing option for queries. This can be
+# either "never", "searching", "always", "finding" or
+# "default". The "default" option falls back to using default
+# dereferencing configured by your ldap.conf. (string value)
+#alias_dereferencing=default
+
+# Sets the LDAP debugging level for LDAP calls. A value of 0
+# means that debugging is not enabled. This value is a
+# bitmask, consult your LDAP documentation for possible
+# values. (integer value)
+#debug_level=<None>
+
+# Override the system's default referral chasing behavior for
+# queries. (boolean value)
+#chase_referrals=<None>
+
+# Search base for users. (string value)
+#user_tree_dn=<None>
+
+# LDAP search filter for users. (string value)
+#user_filter=<None>
+
+# LDAP objectclass for users. (string value)
+#user_objectclass=inetOrgPerson
+
+# LDAP attribute mapped to user id. WARNING: must not be a
+# multivalued attribute. (string value)
+#user_id_attribute=cn
+
+# LDAP attribute mapped to user name. (string value)
+#user_name_attribute=sn
+
+# LDAP attribute mapped to user email. (string value)
+#user_mail_attribute=mail
+
+# LDAP attribute mapped to password. (string value)
+#user_pass_attribute=userPassword
+
+# LDAP attribute mapped to user enabled flag. (string value)
+#user_enabled_attribute=enabled
+
+# Invert the meaning of the boolean enabled values. Some LDAP
+# servers use a boolean lock attribute where "true" means an
+# account is disabled. Setting "user_enabled_invert = true"
+# will allow these lock attributes to be used. This setting
+# will have no effect if "user_enabled_mask" or
+# "user_enabled_emulation" settings are in use. (boolean
+# value)
+#user_enabled_invert=false
+
+# Bitmask integer to indicate the bit that the enabled value
+# is stored in if the LDAP server represents "enabled" as a
+# bit on an integer rather than a boolean. A value of "0"
+# indicates the mask is not used. If this is not set to "0"
+# the typical value is "2". This is typically used when
+# "user_enabled_attribute = userAccountControl". (integer
+# value)
+#user_enabled_mask=0
+
+# Default value to enable users. This should match an
+# appropriate int value if the LDAP server uses non-boolean
+# (bitmask) values to indicate if a user is enabled or
+# disabled. If this is not set to "True" the typical value is
+# "512". This is typically used when "user_enabled_attribute =
+# userAccountControl". (string value)
+#user_enabled_default=True
+
+# List of attributes stripped off the user on update. (list
+# value)
+#user_attribute_ignore=default_project_id,tenants
+
+# LDAP attribute mapped to default_project_id for users.
+# (string value)
+#user_default_project_id_attribute=<None>
+
+# Allow user creation in LDAP backend. (boolean value)
+#user_allow_create=true
+
+# Allow user updates in LDAP backend. (boolean value)
+#user_allow_update=true
+
+# Allow user deletion in LDAP backend. (boolean value)
+#user_allow_delete=true
+
+# If true, Keystone uses an alternative method to determine if
+# a user is enabled or not by checking if they are a member of
+# the "user_enabled_emulation_dn" group. (boolean value)
+#user_enabled_emulation=false
+
+# DN of the group entry to hold enabled users when using
+# enabled emulation. (string value)
+#user_enabled_emulation_dn=<None>
+
+# List of additional LDAP attributes used for mapping
+# additional attribute mappings for users. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#user_additional_attribute_mapping=
+
+# Search base for projects (string value)
+# Deprecated group/name - [ldap]/tenant_tree_dn
+#project_tree_dn=<None>
+
+# LDAP search filter for projects. (string value)
+# Deprecated group/name - [ldap]/tenant_filter
+#project_filter=<None>
+
+# LDAP objectclass for projects. (string value)
+# Deprecated group/name - [ldap]/tenant_objectclass
+#project_objectclass=groupOfNames
+
+# LDAP attribute mapped to project id. (string value)
+# Deprecated group/name - [ldap]/tenant_id_attribute
+#project_id_attribute=cn
+
+# LDAP attribute mapped to project membership for user.
+# (string value)
+# Deprecated group/name - [ldap]/tenant_member_attribute
+#project_member_attribute=member
+
+# LDAP attribute mapped to project name. (string value)
+# Deprecated group/name - [ldap]/tenant_name_attribute
+#project_name_attribute=ou
+
+# LDAP attribute mapped to project description. (string value)
+# Deprecated group/name - [ldap]/tenant_desc_attribute
+#project_desc_attribute=description
+
+# LDAP attribute mapped to project enabled. (string value)
+# Deprecated group/name - [ldap]/tenant_enabled_attribute
+#project_enabled_attribute=enabled
+
+# LDAP attribute mapped to project domain_id. (string value)
+# Deprecated group/name - [ldap]/tenant_domain_id_attribute
+#project_domain_id_attribute=businessCategory
+
+# List of attributes stripped off the project on update. (list
+# value)
+# Deprecated group/name - [ldap]/tenant_attribute_ignore
+#project_attribute_ignore=
+
+# Allow project creation in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_create
+#project_allow_create=true
+
+# Allow project update in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_update
+#project_allow_update=true
+
+# Allow project deletion in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_delete
+#project_allow_delete=true
+
+# If true, Keystone uses an alternative method to determine if
+# a project is enabled or not by checking if they are a member
+# of the "project_enabled_emulation_dn" group. (boolean value)
+# Deprecated group/name - [ldap]/tenant_enabled_emulation
+#project_enabled_emulation=false
+
+# DN of the group entry to hold enabled projects when using
+# enabled emulation. (string value)
+# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn
+#project_enabled_emulation_dn=<None>
+
+# Additional attribute mappings for projects. Attribute
+# mapping format is <ldap_attr>:<user_attr>, where ldap_attr
+# is the attribute in the LDAP entry and user_attr is the
+# Identity API attribute. (list value)
+# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping
+#project_additional_attribute_mapping=
+
+# Search base for roles. (string value)
+#role_tree_dn=<None>
+
+# LDAP search filter for roles. (string value)
+#role_filter=<None>
+
+# LDAP objectclass for roles. (string value)
+#role_objectclass=organizationalRole
+
+# LDAP attribute mapped to role id. (string value)
+#role_id_attribute=cn
+
+# LDAP attribute mapped to role name. (string value)
+#role_name_attribute=ou
+
+# LDAP attribute mapped to role membership. (string value)
+#role_member_attribute=roleOccupant
+
+# List of attributes stripped off the role on update. (list
+# value)
+#role_attribute_ignore=
+
+# Allow role creation in LDAP backend. (boolean value)
+#role_allow_create=true
+
+# Allow role update in LDAP backend. (boolean value)
+#role_allow_update=true
+
+# Allow role deletion in LDAP backend. (boolean value)
+#role_allow_delete=true
+
+# Additional attribute mappings for roles. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#role_additional_attribute_mapping=
+
+# Search base for groups. (string value)
+#group_tree_dn=<None>
+
+# LDAP search filter for groups. (string value)
+#group_filter=<None>
+
+# LDAP objectclass for groups. (string value)
+#group_objectclass=groupOfNames
+
+# LDAP attribute mapped to group id. (string value)
+#group_id_attribute=cn
+
+# LDAP attribute mapped to group name. (string value)
+#group_name_attribute=ou
+
+# LDAP attribute mapped to show group membership. (string
+# value)
+#group_member_attribute=member
+
+# LDAP attribute mapped to group description. (string value)
+#group_desc_attribute=description
+
+# List of attributes stripped off the group on update. (list
+# value)
+#group_attribute_ignore=
+
+# Allow group creation in LDAP backend. (boolean value)
+#group_allow_create=true
+
+# Allow group update in LDAP backend. (boolean value)
+#group_allow_update=true
+
+# Allow group deletion in LDAP backend. (boolean value)
+#group_allow_delete=true
+
+# Additional attribute mappings for groups. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#group_additional_attribute_mapping=
+
+# CA certificate file path for communicating with LDAP
+# servers. (string value)
+#tls_cacertfile=<None>
+
+# CA certificate directory path for communicating with LDAP
+# servers. (string value)
+#tls_cacertdir=<None>
+
+# Enable TLS for communicating with LDAP servers. (boolean
+# value)
+#use_tls=false
+
+# Valid options for tls_req_cert are demand, never, and allow.
+# (string value)
+#tls_req_cert=demand
+
+# Enable LDAP connection pooling. (boolean value)
+#use_pool=false
+
+# Connection pool size. (integer value)
+#pool_size=10
+
+# Maximum count of reconnect trials. (integer value)
+#pool_retry_max=3
+
+# Time span in seconds to wait between two reconnect trials.
+# (floating point value)
+#pool_retry_delay=0.1
+
+# Connector timeout in seconds. Value -1 indicates indefinite
+# wait for response. (integer value)
+#pool_connection_timeout=-1
+
+# Connection lifetime in seconds. (integer value)
+#pool_connection_lifetime=600
+
+# Enable LDAP connection pooling for end user authentication.
+# If use_pool is disabled, then this setting is meaningless
+# and is not used at all. (boolean value)
+#use_auth_pool=false
+
+# End user auth connection pool size. (integer value)
+#auth_pool_size=100
+
+# End user auth connection lifetime in seconds. (integer
+# value)
+#auth_pool_connection_lifetime=60
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[memcache]
+
+#
+# Options defined in keystone
+#
+
+# Memcache servers in the format of "host:port". (list value)
+#servers=localhost:11211
+
+# Number of seconds memcached server is considered dead before
+# it is tried again. This is used by the key value store
+# system (e.g. token pooled memcached persistence backend).
+# (integer value)
+#dead_retry=300
+
+# Timeout in seconds for every call to a server. This is used
+# by the key value store system (e.g. token pooled memcached
+# persistence backend). (integer value)
+#socket_timeout=3
+
+# Max total number of open connections to every memcached
+# server. This is used by the key value store system (e.g.
+# token pooled memcached persistence backend). (integer value)
+#pool_maxsize=10
+
+# Number of seconds a connection to memcached is held unused
+# in the pool before it is closed. This is used by the key
+# value store system (e.g. token pooled memcached persistence
+# backend). (integer value)
+#pool_unused_timeout=60
+
+# Number of seconds that an operation will wait to get a
+# memcache client connection. This is used by the key value
+# store system (e.g. token pooled memcached persistence
+# backend). (integer value)
+#pool_connection_get_timeout=10
+
+
+[oauth1]
+
+#
+# Options defined in keystone
+#
+
+# Credential backend driver. (string value)
+#driver=keystone.contrib.oauth1.backends.sql.OAuth1
+
+# Duration (in seconds) for the OAuth Request Token. (integer
+# value)
+#request_token_duration=28800
+
+# Duration (in seconds) for the OAuth Access Token. (integer
+# value)
+#access_token_duration=86400
+
+
+[os_inherit]
+
+#
+# Options defined in keystone
+#
+
+# role-assignment inheritance to projects from owning domain
+# can be optionally enabled. (boolean value)
+#enabled=false
+
+
+[paste_deploy]
+
+#
+# Options defined in keystone
+#
+
+# Name of the paste configuration file that defines the
+# available pipelines. (string value)
+#config_file=keystone-paste.ini
+
+
+[policy]
+
+#
+# Options defined in keystone
+#
+
+# Policy backend driver. (string value)
+#driver=keystone.policy.backends.sql.Policy
+
+# Maximum number of entities that will be returned in a policy
+# collection. (integer value)
+#list_limit=<None>
+
+
+[revoke]
+
+#
+# Options defined in keystone
+#
+
+# An implementation of the backend for persisting revocation
+# events. (string value)
+#driver=keystone.contrib.revoke.backends.kvs.Revoke
+
+# This value (calculated in seconds) is added to token
+# expiration before a revocation event may be removed from the
+# backend. (integer value)
+#expiration_buffer=1800
+
+# Toggle for revocation event caching. This has no effect
+# unless global caching is enabled. (boolean value)
+#caching=true
+
+
+[saml]
+
+#
+# Options defined in keystone
+#
+
+# Default TTL, in seconds, for any generated SAML assertion
+# created by Keystone. (integer value)
+#assertion_expiration_time=3600
+
+# Binary to be called for XML signing. Install the appropriate
+# package, specify absolute path or adjust your PATH
+# environment variable if the binary cannot be found. (string
+# value)
+#xmlsec1_binary=xmlsec1
+
+# Path of the certfile for SAML signing. For non-production
+# environments, you may be interested in using `keystone-
+# manage pki_setup` to generate self-signed certificates.
+# Note, the path cannot contain a comma. (string value)
+#certfile=/etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for SAML signing. Note, the path cannot
+# contain a comma. (string value)
+#keyfile=/etc/keystone/ssl/private/signing_key.pem
+
+# Entity ID value for unique Identity Provider identification.
+# Usually FQDN is set with a suffix. A value is required to
+# generate IDP Metadata. For example:
+# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp
+# (string value)
+#idp_entity_id=<None>
+
+# Identity Provider Single-Sign-On service value, required in
+# the Identity Provider's metadata. A value is required to
+# generate IDP Metadata. For example:
+# https://keystone.example.com/v3/OS-FEDERATION/saml2/sso
+# (string value)
+#idp_sso_endpoint=<None>
+
+# Language used by the organization. (string value)
+#idp_lang=en
+
+# Organization name the installation belongs to. (string
+# value)
+#idp_organization_name=<None>
+
+# Organization name to be displayed. (string value)
+#idp_organization_display_name=<None>
+
+# URL of the organization. (string value)
+#idp_organization_url=<None>
+
+# Company of contact person. (string value)
+#idp_contact_company=<None>
+
+# Given name of contact person (string value)
+#idp_contact_name=<None>
+
+# Surname of contact person. (string value)
+#idp_contact_surname=<None>
+
+# Email address of contact person. (string value)
+#idp_contact_email=<None>
+
+# Telephone number of contact person. (string value)
+#idp_contact_telephone=<None>
+
+# Contact type. Allowed values are: technical, support,
+# administrative billing, and other (string value)
+#idp_contact_type=other
+
+# Path to the Identity Provider Metadata file. This file
+# should be generated with the keystone-manage
+# saml_idp_metadata command. (string value)
+#idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml
+
+
+[signing]
+
+#
+# Options defined in keystone
+#
+
+# Deprecated in favor of provider in the [token] section.
+# (string value)
+#token_format=<None>
+
+# Path of the certfile for token signing. For non-production
+# environments, you may be interested in using `keystone-
+# manage pki_setup` to generate self-signed certificates.
+# (string value)
+#certfile=/etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+#keyfile=/etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key for token signing. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Key size (in bits) for token signing cert (auto generated
+# certificate). (integer value)
+#key_size=2048
+
+# Days the token signing cert is valid for (auto generated
+# certificate). (integer value)
+#valid_days=3650
+
+# Certificate subject (auto generated certificate) for token
+# signing. (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# Options defined in keystone
+#
+
+# Toggle for SSL support on the Keystone eventlet servers.
+# (boolean value)
+#enable=false
+
+# Path of the certfile for SSL. For non-production
+# environments, you may be interested in using `keystone-
+# manage ssl_setup` to generate self-signed certificates.
+# (string value)
+#certfile=/etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+#keyfile=/etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the ca cert file for SSL. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key file for SSL. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Require client certificate. (boolean value)
+#cert_required=false
+
+# SSL key length (in bits) (auto generated certificate).
+# (integer value)
+#key_size=1024
+
+# Days the certificate is valid for once signed (auto
+# generated certificate). (integer value)
+#valid_days=3650
+
+# SSL certificate subject (auto generated certificate).
+# (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+
+[stats]
+
+#
+# Options defined in keystone
+#
+
+# Stats backend driver. (string value)
+#driver=keystone.contrib.stats.backends.kvs.Stats
+
+
+[token]
+
+#
+# Options defined in keystone
+#
+
+# External auth mechanisms that should add bind information to
+# token, e.g., kerberos,x509. (list value)
+#bind=
+
+# Enforcement policy on tokens presented to Keystone with bind
+# information. One of disabled, permissive, strict, required
+# or a specifically required bind mode, e.g., kerberos or x509
+# to require binding to that authentication. (string value)
+#enforce_token_bind=permissive
+
+# Amount of time a token should remain valid (in seconds).
+# (integer value)
+#expiration=3600
+
+# Controls the token construction, validation, and revocation
+# operations. Core providers are
+# "keystone.token.providers.[pkiz|pki|uuid].Provider". The
+# default provider is pkiz. (string value)
+provider=keystone.token.providers.uuid.Provider
+
+# Token persistence backend driver. (string value)
+driver=keystone.token.backends.sql.Token
+
+# Toggle for token system caching. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# Time to cache the revocation list and the revocation events
+# if revoke extension is enabled (in seconds). This has no
+# effect unless global and token caching are enabled. (integer
+# value)
+#revocation_cache_time=3600
+
+# Time to cache tokens (in seconds). This has no effect unless
+# global and token caching are enabled. (integer value)
+#cache_time=<None>
+
+# Revoke token by token identifier. Setting revoke_by_id to
+# true enables various forms of enumerating tokens, e.g. `list
+# tokens for user`. These enumerations are processed to
+# determine the list of tokens to revoke. Only disable if you
+# are switching to using the Revoke extension with a backend
+# other than KVS, which stores events in memory. (boolean
+# value)
+#revoke_by_id=true
+
+# The hash algorithm to use for PKI tokens. This can be set to
+# any algorithm that hashlib supports. WARNING: Before
+# changing this value, the auth_token middleware must be
+# configured with the hash_algorithms, otherwise token
+# revocation will not be processed correctly. (string value)
+#hash_algorithm=md5
+
+
+[trust]
+
+#
+# Options defined in keystone
+#
+
+# Delegation and impersonation features can be optionally
+# disabled. (boolean value)
+#enabled=true
+
+# Trust backend driver. (string value)
+#driver=keystone.trust.backends.sql.Trust
+
+
diff --git a/install-files/openstack/usr/share/openstack/keystone/logging.conf b/install-files/openstack/usr/share/openstack/keystone/logging.conf
new file mode 100644
index 00000000..6cb8c425
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/keystone/logging.conf
@@ -0,0 +1,65 @@
+[loggers]
+keys=root,access
+
+[handlers]
+keys=production,file,access_file,devel
+
+[formatters]
+keys=minimal,normal,debug
+
+
+###########
+# Loggers #
+###########
+
+[logger_root]
+level=WARNING
+handlers=file
+
+[logger_access]
+level=INFO
+qualname=access
+handlers=access_file
+
+
+################
+# Log Handlers #
+################
+
+[handler_production]
+class=handlers.SysLogHandler
+level=ERROR
+formatter=normal
+args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
+
+[handler_file]
+class=handlers.WatchedFileHandler
+level=WARNING
+formatter=normal
+args=('error.log',)
+
+[handler_access_file]
+class=handlers.WatchedFileHandler
+level=INFO
+formatter=minimal
+args=('access.log',)
+
+[handler_devel]
+class=StreamHandler
+level=NOTSET
+formatter=debug
+args=(sys.stdout,)
+
+
+##################
+# Log Formatters #
+##################
+
+[formatter_minimal]
+format=%(message)s
+
+[formatter_normal]
+format=(%(name)s): %(asctime)s %(levelname)s %(message)s
+
+[formatter_debug]
+format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
diff --git a/install-files/openstack/usr/share/openstack/keystone/policy.json b/install-files/openstack/usr/share/openstack/keystone/policy.json
new file mode 100644
index 00000000..af65205e
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/keystone/policy.json
@@ -0,0 +1,171 @@
+{
+ "admin_required": "role:admin or is_admin:1",
+ "service_role": "role:service",
+ "service_or_admin": "rule:admin_required or rule:service_role",
+ "owner" : "user_id:%(user_id)s",
+ "admin_or_owner": "rule:admin_required or rule:owner",
+
+ "default": "rule:admin_required",
+
+ "identity:get_region": "",
+ "identity:list_regions": "",
+ "identity:create_region": "rule:admin_required",
+ "identity:update_region": "rule:admin_required",
+ "identity:delete_region": "rule:admin_required",
+
+ "identity:get_service": "rule:admin_required",
+ "identity:list_services": "rule:admin_required",
+ "identity:create_service": "rule:admin_required",
+ "identity:update_service": "rule:admin_required",
+ "identity:delete_service": "rule:admin_required",
+
+ "identity:get_endpoint": "rule:admin_required",
+ "identity:list_endpoints": "rule:admin_required",
+ "identity:create_endpoint": "rule:admin_required",
+ "identity:update_endpoint": "rule:admin_required",
+ "identity:delete_endpoint": "rule:admin_required",
+
+ "identity:get_domain": "rule:admin_required",
+ "identity:list_domains": "rule:admin_required",
+ "identity:create_domain": "rule:admin_required",
+ "identity:update_domain": "rule:admin_required",
+ "identity:delete_domain": "rule:admin_required",
+
+ "identity:get_project": "rule:admin_required",
+ "identity:list_projects": "rule:admin_required",
+ "identity:list_user_projects": "rule:admin_or_owner",
+ "identity:create_project": "rule:admin_required",
+ "identity:update_project": "rule:admin_required",
+ "identity:delete_project": "rule:admin_required",
+
+ "identity:get_user": "rule:admin_required",
+ "identity:list_users": "rule:admin_required",
+ "identity:create_user": "rule:admin_required",
+ "identity:update_user": "rule:admin_required",
+ "identity:delete_user": "rule:admin_required",
+ "identity:change_password": "rule:admin_or_owner",
+
+ "identity:get_group": "rule:admin_required",
+ "identity:list_groups": "rule:admin_required",
+ "identity:list_groups_for_user": "rule:admin_or_owner",
+ "identity:create_group": "rule:admin_required",
+ "identity:update_group": "rule:admin_required",
+ "identity:delete_group": "rule:admin_required",
+ "identity:list_users_in_group": "rule:admin_required",
+ "identity:remove_user_from_group": "rule:admin_required",
+ "identity:check_user_in_group": "rule:admin_required",
+ "identity:add_user_to_group": "rule:admin_required",
+
+ "identity:get_credential": "rule:admin_required",
+ "identity:list_credentials": "rule:admin_required",
+ "identity:create_credential": "rule:admin_required",
+ "identity:update_credential": "rule:admin_required",
+ "identity:delete_credential": "rule:admin_required",
+
+ "identity:ec2_get_credential": "rule:admin_or_owner",
+ "identity:ec2_list_credentials": "rule:admin_or_owner",
+ "identity:ec2_create_credential": "rule:admin_or_owner",
+ "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
+
+ "identity:get_role": "rule:admin_required",
+ "identity:list_roles": "rule:admin_required",
+ "identity:create_role": "rule:admin_required",
+ "identity:update_role": "rule:admin_required",
+ "identity:delete_role": "rule:admin_required",
+
+ "identity:check_grant": "rule:admin_required",
+ "identity:list_grants": "rule:admin_required",
+ "identity:create_grant": "rule:admin_required",
+ "identity:revoke_grant": "rule:admin_required",
+
+ "identity:list_role_assignments": "rule:admin_required",
+
+ "identity:get_policy": "rule:admin_required",
+ "identity:list_policies": "rule:admin_required",
+ "identity:create_policy": "rule:admin_required",
+ "identity:update_policy": "rule:admin_required",
+ "identity:delete_policy": "rule:admin_required",
+
+ "identity:check_token": "rule:admin_required",
+ "identity:validate_token": "rule:service_or_admin",
+ "identity:validate_token_head": "rule:service_or_admin",
+ "identity:revocation_list": "rule:service_or_admin",
+ "identity:revoke_token": "rule:admin_or_owner",
+
+ "identity:create_trust": "user_id:%(trust.trustor_user_id)s",
+ "identity:get_trust": "rule:admin_or_owner",
+ "identity:list_trusts": "",
+ "identity:list_roles_for_trust": "",
+ "identity:check_role_for_trust": "",
+ "identity:get_role_for_trust": "",
+ "identity:delete_trust": "",
+
+ "identity:create_consumer": "rule:admin_required",
+ "identity:get_consumer": "rule:admin_required",
+ "identity:list_consumers": "rule:admin_required",
+ "identity:delete_consumer": "rule:admin_required",
+ "identity:update_consumer": "rule:admin_required",
+
+ "identity:authorize_request_token": "rule:admin_required",
+ "identity:list_access_token_roles": "rule:admin_required",
+ "identity:get_access_token_role": "rule:admin_required",
+ "identity:list_access_tokens": "rule:admin_required",
+ "identity:get_access_token": "rule:admin_required",
+ "identity:delete_access_token": "rule:admin_required",
+
+ "identity:list_projects_for_endpoint": "rule:admin_required",
+ "identity:add_endpoint_to_project": "rule:admin_required",
+ "identity:check_endpoint_in_project": "rule:admin_required",
+ "identity:list_endpoints_for_project": "rule:admin_required",
+ "identity:remove_endpoint_from_project": "rule:admin_required",
+
+ "identity:create_endpoint_group": "rule:admin_required",
+ "identity:list_endpoint_groups": "rule:admin_required",
+ "identity:get_endpoint_group": "rule:admin_required",
+ "identity:update_endpoint_group": "rule:admin_required",
+ "identity:delete_endpoint_group": "rule:admin_required",
+ "identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
+ "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
+ "identity:list_endpoint_groups_for_project": "rule:admin_required",
+ "identity:add_endpoint_group_to_project": "rule:admin_required",
+ "identity:remove_endpoint_group_from_project": "rule:admin_required",
+
+ "identity:create_identity_provider": "rule:admin_required",
+ "identity:list_identity_providers": "rule:admin_required",
+ "identity:get_identity_providers": "rule:admin_required",
+ "identity:update_identity_provider": "rule:admin_required",
+ "identity:delete_identity_provider": "rule:admin_required",
+
+ "identity:create_protocol": "rule:admin_required",
+ "identity:update_protocol": "rule:admin_required",
+ "identity:get_protocol": "rule:admin_required",
+ "identity:list_protocols": "rule:admin_required",
+ "identity:delete_protocol": "rule:admin_required",
+
+ "identity:create_mapping": "rule:admin_required",
+ "identity:get_mapping": "rule:admin_required",
+ "identity:list_mappings": "rule:admin_required",
+ "identity:delete_mapping": "rule:admin_required",
+ "identity:update_mapping": "rule:admin_required",
+
+ "identity:get_auth_catalog": "",
+ "identity:get_auth_projects": "",
+ "identity:get_auth_domains": "",
+
+ "identity:list_projects_for_groups": "",
+ "identity:list_domains_for_groups": "",
+
+ "identity:list_revoke_events": "",
+
+ "identity:create_policy_association_for_endpoint": "rule:admin_required",
+ "identity:check_policy_association_for_endpoint": "rule:admin_required",
+ "identity:delete_policy_association_for_endpoint": "rule:admin_required",
+ "identity:create_policy_association_for_service": "rule:admin_required",
+ "identity:check_policy_association_for_service": "rule:admin_required",
+ "identity:delete_policy_association_for_service": "rule:admin_required",
+ "identity:create_policy_association_for_region_and_service": "rule:admin_required",
+ "identity:check_policy_association_for_region_and_service": "rule:admin_required",
+ "identity:delete_policy_association_for_region_and_service": "rule:admin_required",
+ "identity:get_policy_for_endpoint": "rule:admin_required",
+ "identity:list_endpoints_for_policy": "rule:admin_required"
+}
diff --git a/install-files/openstack/usr/share/openstack/network.yml b/install-files/openstack/usr/share/openstack/network.yml
new file mode 100644
index 00000000..f99f7f1a
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/network.yml
@@ -0,0 +1,67 @@
+---
+- hosts: localhost
+ vars_files:
+ - /etc/openstack/network.conf
+ tasks:
+# Create the bridges to use the External network mapped
+
+# Count number of network interfaces (interfaces starting with 'e')
+ - shell: ls /sys/class/net | grep ^e.* | wc -l
+ register: number_interfaces
+
+# Abort if there number of interfaces != 1
+ - fail:
+ msg: More than one, or none network interfaces found.
+ when: EXTERNAL_INTERFACE is not defined and number_interfaces.stdout != "1"
+
+ - shell: ls /sys/class/net | grep ^e.*
+ register: interface_name
+ when: EXTERNAL_INTERFACE is not defined
+
+ - set_fact:
+ ETH_INTERFACE: "{{ interface_name.stdout }}"
+ when: EXTERNAL_INTERFACE is not defined
+
+ - set_fact:
+ ETH_INTERFACE: "{{ EXTERNAL_INTERFACE }}"
+ when: EXTERNAL_INTERFACE is defined
+
+ - set_fact:
+ ETH_MAC_ADDRESS: "{{ hostvars['localhost']['ansible_' + ETH_INTERFACE]['macaddress'] }}"
+
+ - name: Create the /run/systemd/network
+ file:
+ path: /run/systemd/network
+ state: directory
+
+ - name: Disable dhcp on the bound physical interface
+ template:
+ src: /usr/share/openstack/extras/00-disable-device.network
+ dest: /run/systemd/network/00-disable-{{ item }}-config.network
+ with_items:
+ - "{{ ETH_INTERFACE }}"
+
+ - name: Disable dhcp on all the internal interfaces
+ template:
+ src: /usr/share/openstack/extras/00-disable-device.network
+ dest: /run/systemd/network/00-disable-{{ item }}-config.network
+ with_items:
+ - ovs-system
+
+ - openvswitch_bridge:
+ bridge: br-ex
+ state: present
+
+ - openvswitch_port:
+ bridge: br-ex
+ port: "{{ ETH_INTERFACE }}"
+ state: present
+
+ - shell: ovs-vsctl set bridge br-ex other-config:hwaddr={{ ETH_MAC_ADDRESS }}
+
+ - name: Enable dhcp on the Open vSwitch device that replaces our external interface
+ template:
+ src: /usr/share/openstack/extras/60-device-dhcp.network
+ dest: /run/systemd/network/60-{{ item }}-dhcp.network
+ with_items:
+ - br-ex
diff --git a/install-files/openstack/usr/share/openstack/neutron-config.yml b/install-files/openstack/usr/share/openstack/neutron-config.yml
new file mode 100644
index 00000000..97f4c76e
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron-config.yml
@@ -0,0 +1,48 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/neutron.conf"
+ tasks:
+
+ - name: Create the neutron user.
+ user:
+ name: neutron
+ comment: Openstack Neutron Daemons
+ shell: /sbin/nologin
+ home: /var/lib/neutron
+
+ - name: Create the /var folders for neutron
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: neutron
+ group: neutron
+ with_items:
+ - /var/run/neutron
+ - /var/lock/neutron
+ - /var/log/neutron
+
+ - name: Get service tenant id needed in neutron.conf
+ shell: |
+ keystone \
+ --os-endpoint http://{{ CONTROLLER_HOST_ADDRESS|quote }}:35357/v2.0 \
+ --os-token {{ KEYSTONE_TEMPORARY_ADMIN_TOKEN|quote }} \
+ tenant-get service | grep id | tr -d " " | cut -d"|" -f3
+ register: tenant_service_id
+
+ - set_fact:
+ SERVICE_TENANT_ID: "{{ tenant_service_id.stdout }}"
+
+ - name: Create the directories needed for Neutron configuration files.
+ file:
+ path: /etc/{{ item }}
+ state: directory
+ with_lines:
+ - cd /usr/share/openstack && find neutron -type d
+
+ - name: Add configuration needed for neutron using templates
+ template:
+ src: /usr/share/openstack/{{ item }}
+ dest: /etc/{{ item }}
+ with_lines:
+ - cd /usr/share/openstack && find neutron -type f
diff --git a/install-files/openstack/usr/share/openstack/neutron-db.yml b/install-files/openstack/usr/share/openstack/neutron-db.yml
new file mode 100644
index 00000000..91dde6fe
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron-db.yml
@@ -0,0 +1,51 @@
+---
+- hosts: localhost
+ vars_files:
+ - "/etc/openstack/neutron.conf"
+ tasks:
+ - name: Create neutron service user in service tenant
+ keystone_user:
+ user: "{{ NEUTRON_SERVICE_USER }}"
+ password: "{{ NEUTRON_SERVICE_PASSWORD }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Add admin role to neutron service user in service tenant
+ keystone_user:
+ role: admin
+ user: "{{ NEUTRON_SERVICE_USER }}"
+ tenant: service
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - keystone_service:
+ name: neutron
+ type: network
+ description: Openstack Compute Networking
+ publicurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+ internalurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+ adminurl: http://{{ CONTROLLER_HOST_ADDRESS }}:9696
+ region: regionOne
+ token: "{{ KEYSTONE_TEMPORARY_ADMIN_TOKEN }}"
+
+ - name: Create postgresql user for neutron
+ postgresql_user:
+ name: "{{ NEUTRON_DB_USER }}"
+ password: "{{ NEUTRON_DB_PASSWORD }}"
+ sudo: yes
+ sudo_user: neutron
+
+ - name: Create database for neutron services
+ postgresql_db:
+ name: neutron
+ owner: "{{ NEUTRON_DB_USER }}"
+ sudo: yes
+ sudo_user: neutron
+
+ - name: Initiate neutron database
+ shell: |
+ neutron-db-manage \
+ --config-file /etc/neutron/neutron.conf \
+ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \
+ upgrade juno
+ sudo: yes
+ sudo_user: neutron
diff --git a/install-files/openstack/usr/share/openstack/neutron/api-paste.ini b/install-files/openstack/usr/share/openstack/neutron/api-paste.ini
new file mode 100644
index 00000000..bbcd4152
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/api-paste.ini
@@ -0,0 +1,30 @@
+[composite:neutron]
+use = egg:Paste#urlmap
+/: neutronversions
+/v2.0: neutronapi_v2_0
+
+[composite:neutronapi_v2_0]
+use = call:neutron.auth:pipeline_factory
+noauth = request_id catch_errors extensions neutronapiapp_v2_0
+keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
+
+[filter:request_id]
+paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:catch_errors]
+paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory
+
+[filter:keystonecontext]
+paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:extensions]
+paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
+
+[app:neutronversions]
+paste.app_factory = neutron.api.versions:Versions.factory
+
+[app:neutronapiapp_v2_0]
+paste.app_factory = neutron.api.v2.router:APIRouter.factory
diff --git a/install-files/openstack/usr/share/openstack/neutron/dhcp_agent.ini b/install-files/openstack/usr/share/openstack/neutron/dhcp_agent.ini
new file mode 100644
index 00000000..c6c2b9a7
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/dhcp_agent.ini
@@ -0,0 +1,89 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+use_syslog = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+# resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = True
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+# enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+# dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+# dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/install-files/openstack/usr/share/openstack/neutron/fwaas_driver.ini b/install-files/openstack/usr/share/openstack/neutron/fwaas_driver.ini
new file mode 100644
index 00000000..41f761ab
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/fwaas_driver.ini
@@ -0,0 +1,3 @@
+[fwaas]
+#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
+#enabled = True
diff --git a/install-files/openstack/usr/share/openstack/neutron/l3_agent.ini b/install-files/openstack/usr/share/openstack/neutron/l3_agent.ini
new file mode 100644
index 00000000..000cd997
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/l3_agent.ini
@@ -0,0 +1,103 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+use_syslog = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+# handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+# metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+# send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+# periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+# periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
+
+# The working mode for the agent. Allowed values are:
+# - legacy: this preserves the existing behavior where the L3 agent is
+# deployed on a centralized networking node to provide L3 services
+# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
+# - dvr: this mode enables DVR functionality, and must be used for an L3
+# agent that runs on a compute host.
+# - dvr_snat: this enables centralized SNAT support in conjunction with
+# DVR. This mode must be used for an L3 agent running on a centralized
+# node (or in single-host deployments, e.g. devstack).
+# agent_mode = legacy
+
+# Location to store keepalived and all HA configurations
+# ha_confs_path = $state_path/ha_confs
+
+# VRRP authentication type AH/PASS
+# ha_vrrp_auth_type = PASS
+
+# VRRP authentication password
+# ha_vrrp_auth_password =
+
+# The advertisement interval in seconds
+# ha_vrrp_advert_int = 2
diff --git a/install-files/openstack/usr/share/openstack/neutron/lbaas_agent.ini b/install-files/openstack/usr/share/openstack/neutron/lbaas_agent.ini
new file mode 100644
index 00000000..68a2759e
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/lbaas_agent.ini
@@ -0,0 +1,42 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output).
+# debug = False
+
+# The LBaaS agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+# periodic_interval = 10
+
+# LBaas requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version.
+# Multiple device drivers reflecting different service providers could be specified:
+# device_driver = path.to.provider1.driver.Driver
+# device_driver = path.to.provider2.driver.Driver
+# Default is:
+# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver
+
+[haproxy]
+# Location to store config and state files
+# loadbalancer_state_path = $state_path/lbaas
+
+# The user group
+# user_group = nogroup
+
+# When delete and re-add the same vip, send this many gratuitous ARPs to flush
+# the ARP cache in the Router. Set it below or equal to 0 to disable this feature.
+# send_gratuitous_arp = 3
diff --git a/install-files/openstack/usr/share/openstack/neutron/metadata_agent.ini b/install-files/openstack/usr/share/openstack/neutron/metadata_agent.ini
new file mode 100644
index 00000000..ed238770
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/metadata_agent.ini
@@ -0,0 +1,60 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = True
+use_syslog = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+auth_region = regionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = {{ NEUTRON_SERVICE_USER }}
+admin_password = {{ NEUTRON_SERVICE_PASSWORD }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ CONTROLLER_HOST_ADDRESS }}
+
+# TCP Port used by Nova metadata server
+# nova_metadata_port = 8775
+
+# Which protocol to use for requests to Nova metadata server, http or https
+# nova_metadata_protocol = http
+
+# Whether insecure SSL connection should be accepted for Nova metadata server
+# requests
+# nova_metadata_insecure = False
+
+# Client certificate for nova api, needed when nova api requires client
+# certificates
+# nova_client_cert =
+
+# Private key for nova client certificate
+# nova_client_priv_key =
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_PROXY_SHARED_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server. Defaults to
+# half the number of CPU cores
+# metadata_workers =
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 4096
+
+# URL to connect to the cache backend.
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url = memory://?default_ttl=5
diff --git a/install-files/openstack/usr/share/openstack/neutron/metering_agent.ini b/install-files/openstack/usr/share/openstack/neutron/metering_agent.ini
new file mode 100644
index 00000000..88826ce7
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/metering_agent.ini
@@ -0,0 +1,18 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = True
+
+# Default driver:
+# driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver
+# Example of non-default driver
+# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver
+
+# Interval between two metering measures
+# measure_interval = 30
+
+# Interval between two metering reports
+# report_interval = 300
+
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# use_namespaces = True
diff --git a/install-files/openstack/usr/share/openstack/neutron/neutron.conf b/install-files/openstack/usr/share/openstack/neutron/neutron.conf
new file mode 100644
index 00000000..51de7464
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/neutron.conf
@@ -0,0 +1,640 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+# verbose = False
+
+# =========Start Global Config Option for Distributed L3 Router===============
+# Setting the "router_distributed" flag to "True" will default to the creation
+# of distributed tenant routers. The admin can override this flag by specifying
+# the type of the router on the create request (admin-only attribute). Default
+# value is "False" to support legacy mode (centralized) routers.
+#
+# router_distributed = False
+#
+# ===========End Global Config Option for Distributed L3 Router===============
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+# debug = False
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+use_syslog = True
+
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+# log_dir =
+
+# publish_errors = False
+
+# Address to bind the API server to
+# bind_host = 0.0.0.0
+
+# Port the bind the API server to
+# bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+service_plugins = router
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# DVR Base MAC address. The first 3 octets will remain unchanged. If the
+# 4th octet is not 00, it will also be used. The others will be randomly
+# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
+# avoid mixing them up with MAC's allocated for tenant ports.
+# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
+# The default is 3 octet
+# dvr_base_mac = fa:16:3f:00:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds). Use -1 to
+# tell dnsmasq to use infinite lease times.
+# dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet. For IPv6, validate only if
+# gateway is not a link local address. Deprecated, to be removed during the
+# K release, at which point the check will be mandatory.
+# force_gateway_on_subnet = True
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# Maximum number of routes per router
+# max_routes = 30
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+# agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Allow automatic rescheduling of routers from dead L3 agents with
+# admin_state_up set to True to alive agents.
+# allow_automatic_l3agent_failover = False
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== items for l3 extension ==============
+# Enable high availability for virtual routers.
+# l3_ha = False
+#
+# Maximum number of l3 agents which a HA router will be scheduled on. If it
+# is set to 0 the router will be scheduled on every agent.
+# max_l3_agents_per_router = 3
+#
+# Minimum number of l3 agents which a HA router will be scheduled on. The
+# default value is 2.
+# min_l3_agents_per_router = 2
+#
+# CIDR of the administrative network if HA mode is enabled
+# l3_ha_net_cidr = 169.254.192.0/18
+# =========== end of items for l3 extension =======
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+# api_workers = 0
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+# rpc_workers = 0
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = regionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = {{ NOVA_SERVICE_USER }}
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ SERVICE_TENANT_ID }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_SERVICE_PASSWORD }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+
+# CA file for novaclient to verify server certificates
+# nova_ca_certificates_file =
+
+# Boolean to control ignoring SSL errors on the nova url
+# nova_api_insecure = False
+
+# Number of seconds between sending events to nova if there are any events to send
+# send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+rabbit_host={{ RABBITMQ_HOST }}
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+rabbit_port={{ RABBITMQ_PORT }}
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBITMQ_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBITMQ_PASSWORD }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=oslo
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+notification_driver=neutron.openstack.common.notifier.rpc_notifier
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+[quotas]
+# Default driver to use for quota checks
+# quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+# quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+# default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+# quota_network = 10
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+# quota_subnet = 10
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+# quota_port = 50
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+# quota_security_group = 10
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+# quota_security_group_rule = 100
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitor = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+# Number of firewalls allowed per tenant. A negative value means unlimited.
+# quota_firewall = 1
+
+# Number of firewall policies allowed per tenant. A negative value means
+# unlimited.
+# quota_firewall_policy = 1
+
+# Number of firewall rules allowed per tenant. A negative value means
+# unlimited.
+# quota_firewall_rule = 100
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+# root_helper = sudo
+root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+# report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
+identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
+admin_tenant_name = service
+admin_user = {{ NEUTRON_SERVICE_USER }}
+admin_password = {{ NEUTRON_SERVICE_PASSWORD }}
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite://
+# NOTE: In deployment the [database] section and its connection attribute may
+# be set in the corresponding core plugin '.ini' file. However, it is suggested
+# to put the [database] section and its connection attribute in this
+# configuration file.
+#connection=sqlite:////var/lib/neutron/neutron.sqlite
+connection=postgresql://{{ NEUTRON_DB_USER }}:{{ NEUTRON_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/neutron
+
+# Database engine for which script will be generated when using offline
+# migration
+# engine =
+
+# The SQLAlchemy connection string used to connect to the slave database
+# slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+# max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+# retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+# min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# max_pool_size = 10
+
+# Timeout in seconds before idle sql connections are reaped
+# idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+# max_overflow = 20
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+# connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+# connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
+# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'.
+#service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default
+# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend
+# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini
new file mode 100644
index 00000000..256f7855
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini
@@ -0,0 +1,114 @@
+# Config file for neutron-proxy-plugin.
+
+[restproxy]
+# All configuration for this plugin is in section '[restproxy]'
+#
+# The following parameters are supported:
+# servers : <host:port>[,<host:port>]* (Error if not set)
+# server_auth : <username:password> (default: no auth)
+# server_ssl : True | False (default: True)
+# ssl_cert_directory : <path> (default: /etc/neutron/plugins/bigswitch/ssl)
+# no_ssl_validation : True | False (default: False)
+# ssl_sticky : True | False (default: True)
+# sync_data : True | False (default: False)
+# auto_sync_on_failure : True | False (default: True)
+# consistency_interval : <integer> (default: 60 seconds)
+# server_timeout : <integer> (default: 10 seconds)
+# neutron_id : <string> (default: neutron-<hostname>)
+# add_meta_server_route : True | False (default: True)
+# thread_pool_size : <int> (default: 4)
+
+# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover.
+servers=localhost:8080
+
+# The username and password for authenticating against the BigSwitch or Floodlight controller.
+# server_auth=username:password
+
+# Use SSL when connecting to the BigSwitch or Floodlight controller.
+# server_ssl=True
+
+# Directory which contains the ca_certs and host_certs to be used to validate
+# controller certificates.
+# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/
+
+# If a certificate does not exist for a controller, trust and store the first
+# certificate received for that controller and use it to validate future
+# connections to that controller.
+# ssl_sticky=True
+
+# Do not validate the controller certificates for SSL
+# Warning: This will not provide protection against man-in-the-middle attacks
+# no_ssl_validation=False
+
+# Sync data on connect
+# sync_data=False
+
+# If neutron fails to create a resource because the backend controller
+# doesn't know of a dependency, automatically trigger a full data
+# synchronization to the controller.
+# auto_sync_on_failure=True
+
+# Time between verifications that the backend controller
+# database is consistent with Neutron. (0 to disable)
+# consistency_interval = 60
+
+# Maximum number of seconds to wait for proxy request to connect and complete.
+# server_timeout=10
+
+# User defined identifier for this Neutron deployment
+# neutron_id =
+
+# Flag to decide if a route to the metadata server should be injected into the VM
+# add_meta_server_route = True
+
+# Number of threads to use to handle large volumes of port creation requests
+# thread_pool_size = 4
+
+[nova]
+# Specify the VIF_TYPE that will be controlled on the Nova compute instances
+# options: ivs or ovs
+# default: ovs
+# vif_type = ovs
+
+# Overrides for vif types based on nova compute node host IDs
+# Comma separated list of host IDs to fix to a specific VIF type
+# The VIF type is taken from the end of the configuration item
+# node_override_vif_<vif_type>
+# For example, the following would set the VIF type to IVS for
+# host-id1 and host-id2
+# node_overrride_vif_ivs=host-id1,host-id2
+
+[router]
+# Specify the default router rules installed in newly created tenant routers
+# Specify multiple times for multiple rules
+# Format is <tenant>:<source>:<destination>:<action>
+# Optionally, a comma-separated list of nexthops may be included after <action>
+# Use an * to specify default for all tenants
+# Default is any any allow for all tenants
+# tenant_default_router_rule=*:any:any:permit
+
+# Maximum number of rules that a single router may have
+# Default is 200
+# max_router_rules=200
+
+[restproxyagent]
+
+# Specify the name of the bridge used on compute nodes
+# for attachment.
+# Default: br-int
+# integration_bridge=br-int
+
+# Change the frequency of polling by the restproxy agent.
+# Value is seconds
+# Default: 5
+# polling_interval=5
+
+# Virtual switch type on the compute node.
+# Options: ovs or ivs
+# Default: ovs
+# virtual_switch_type = ovs
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README
new file mode 100644
index 00000000..e7e47a27
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README
@@ -0,0 +1,3 @@
+Certificates in this folder will be used to
+verify signatures for any controllers the plugin
+connects to.
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README
new file mode 100644
index 00000000..8f5f5e77
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README
@@ -0,0 +1,6 @@
+Certificates in this folder must match the name
+of the controller they should be used to authenticate
+with a .pem extension.
+
+For example, the certificate for the controller
+"192.168.0.1" should be named "192.168.0.1.pem".
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini
new file mode 100644
index 00000000..916e9e5d
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini
@@ -0,0 +1,29 @@
+[switch]
+# username = The SSH username to use
+# password = The SSH password to use
+# address = The address of the host to SSH to
+# ostype = Should be NOS, but is unused otherwise
+#
+# Example:
+# username = admin
+# password = password
+# address = 10.24.84.38
+# ostype = NOS
+
+[physical_interface]
+# physical_interface = The network interface to use when creating a port
+#
+# Example:
+# physical_interface = physnet1
+
+[vlans]
+# network_vlan_ranges = <physical network name>:nnnn:mmmm
+#
+# Example:
+# network_vlan_ranges = physnet1:1000:2999
+
+[linux_bridge]
+# physical_interface_mappings = <physical network name>:<local interface>
+#
+# Example:
+# physical_interface_mappings = physnet1:em1
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini
new file mode 100644
index 00000000..d99e8382
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini
@@ -0,0 +1,15 @@
+[cfg_agent]
+# (IntOpt) Interval in seconds for processing of service updates.
+# That is when the config agent's process_services() loop executes
+# and it lets each service helper to process its service resources.
+# rpc_loop_interval = 10
+
+# (StrOpt) Period-separated module path to the routing service helper class.
+# routing_svc_helper_class = neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
+
+# (IntOpt) Timeout value in seconds for connecting to a hosting device.
+# device_connection_timeout = 30
+
+# (IntOpt) The time in seconds until a backlogged hosting device is
+# presumed dead or booted to an error state.
+# hosting_device_dead_timeout = 300
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini
new file mode 100644
index 00000000..17eae737
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini
@@ -0,0 +1,100 @@
+[cisco]
+
+# (StrOpt) A short prefix to prepend to the VLAN number when creating a
+# VLAN interface. For example, if an interface is being created for
+# VLAN 2001 it will be named 'q-2001' using the default prefix.
+#
+# vlan_name_prefix = q-
+# Example: vlan_name_prefix = vnet-
+
+# (StrOpt) A short prefix to prepend to the VLAN number when creating a
+# provider VLAN interface. For example, if an interface is being created
+# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
+#
+# provider_vlan_name_prefix = p-
+# Example: provider_vlan_name_prefix = PV-
+
+# (BoolOpt) A flag indicating whether Openstack networking should manage the
+# creation and removal of VLAN interfaces for provider networks on the Nexus
+# switches. If the flag is set to False then Openstack will not create or
+# remove VLAN interfaces for provider networks, and the administrator needs
+# to manage these interfaces manually or by external orchestration.
+#
+# provider_vlan_auto_create = True
+
+# (BoolOpt) A flag indicating whether Openstack networking should manage
+# the adding and removing of provider VLANs from trunk ports on the Nexus
+# switches. If the flag is set to False then Openstack will not add or
+# remove provider VLANs from trunk ports, and the administrator needs to
+# manage these operations manually or by external orchestration.
+#
+# provider_vlan_auto_trunk = True
+
+# (StrOpt) Period-separated module path to the model class to use for
+# the Cisco neutron plugin.
+#
+# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2
+
+# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches.
+# Note: This feature is not supported on all models/versions of Cisco
+# Nexus switches. To use this feature, all of the Nexus switches in the
+# deployment must support it.
+# nexus_l3_enable = False
+
+# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
+# svi_round_robin = False
+
+# Cisco Nexus Switch configurations.
+# Each switch to be managed by Openstack Neutron must be configured here.
+#
+# N1KV Format.
+# [N1KV:<IP address of VSM>]
+# username=<credential username>
+# password=<credential password>
+#
+# Example:
+# [N1KV:2.2.2.2]
+# username=admin
+# password=mySecretPassword
+
+[cisco_n1k]
+
+# (StrOpt) Specify the name of the integration bridge to which the VIFs are
+# attached.
+# Default value: br-int
+# integration_bridge = br-int
+
+# (StrOpt) Name of the policy profile to be associated with a port when no
+# policy profile is specified during port creates.
+# Default value: service_profile
+# default_policy_profile = service_profile
+
+# (StrOpt) Name of the policy profile to be associated with a port owned by
+# network node (dhcp, router).
+# Default value: dhcp_pp
+# network_node_policy_profile = dhcp_pp
+
+# (StrOpt) Name of the network profile to be associated with a network when no
+# network profile is specified during network creates. Admin should pre-create
+# a network profile with this name.
+# Default value: default_network_profile
+# default_network_profile = network_pool
+
+# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in
+# policy profiles.
+# Default value: 60
+# poll_duration = 60
+
+# (BoolOpt) Specify whether tenants are restricted from accessing all the
+# policy profiles.
+# Default value: False, indicating all tenants can access all policy profiles.
+#
+# restrict_policy_profiles = False
+
+# (IntOpt) Number of threads to use to make HTTP requests to the VSM.
+# Default value: 4
+# http_pool_size = 4
+
+# (IntOpt) Timeout duration in seconds for the http request
+# Default value: 15
+# http_timeout = 15
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini
new file mode 100644
index 00000000..3ef271d2
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini
@@ -0,0 +1,76 @@
+[general]
+#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers
+# backlog_processing_interval = 10
+
+#(StrOpt) Name of the L3 admin tenant
+# l3_admin_tenant = L3AdminTenant
+
+#(StrOpt) Name of management network for hosting device configuration
+# management_network = osn_mgmt_nw
+
+#(StrOpt) Default security group applied on management port
+# default_security_group = mgmt_sec_grp
+
+#(IntOpt) Seconds of no status update until a cfg agent is considered down
+# cfg_agent_down_time = 60
+
+#(StrOpt) Path to templates for hosting devices
+# templates_path = /opt/stack/data/neutron/cisco/templates
+
+#(StrOpt) Path to config drive files for service VM instances
+# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive
+
+#(BoolOpt) Ensure that Nova is running before attempting to create any VM
+# ensure_nova_running = True
+
+[hosting_devices]
+# Settings coupled to CSR1kv VM devices
+# -------------------------------------
+#(StrOpt) Name of Glance image for CSR1kv
+# csr1kv_image = csr1kv_openstack_img
+
+#(StrOpt) UUID of Nova flavor for CSR1kv
+# csr1kv_flavor = 621
+
+#(StrOpt) Plugging driver for CSR1kv
+# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver
+
+#(StrOpt) Hosting device driver for CSR1kv
+# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver
+
+#(StrOpt) Config agent router service driver for CSR1kv
+# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver
+
+#(StrOpt) Configdrive template file for CSR1kv
+# csr1kv_configdrive_template = csr1kv_cfg_template
+
+#(IntOpt) Booting time in seconds before a CSR1kv becomes operational
+# csr1kv_booting_time = 420
+
+#(StrOpt) Username to use for CSR1kv configurations
+# csr1kv_username = stack
+
+#(StrOpt) Password to use for CSR1kv configurations
+# csr1kv_password = cisco
+
+[n1kv]
+# Settings coupled to inter-working with N1kv plugin
+# --------------------------------------------------
+#(StrOpt) Name of N1kv port profile for management ports
+# management_port_profile = osn_mgmt_pp
+
+#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic
+# from VXLAN segmented networks).
+# t1_port_profile = osn_t1_pp
+
+#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic
+# from VLAN segmented networks).
+# t2_port_profile = osn_t2_pp
+
+#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks
+# for VXLAN segmented traffic).
+# t1_network_profile = osn_t1_np
+
+#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks
+# for VLAN segmented traffic).
+# t2_network_profile = osn_t2_np
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini
new file mode 100644
index 00000000..0aee17eb
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini
@@ -0,0 +1,26 @@
+[cisco_csr_ipsec]
+# Status check interval in seconds, for VPNaaS IPSec connections used on CSR
+# status_check_interval = 60
+
+# Cisco CSR management port information for REST access used by VPNaaS
+# TODO(pcm): Remove once CSR is integrated in as a Neutron router.
+#
+# Format is:
+# [cisco_csr_rest:<public IP>]
+# rest_mgmt = <mgmt port IP>
+# tunnel_ip = <tunnel IP>
+# username = <user>
+# password = <password>
+# timeout = <timeout>
+# host = <hostname>
+# tunnel_if = <tunnel I/F>
+#
+# where:
+# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR)
+# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel
+# mgmt port IP -- IP address of CSR for REST API access
+# user ---------- Username for REST management port access to Cisco CSR
+# password ------ Password for REST management port access to Cisco CSR
+# timeout ------- REST request timeout to Cisco CSR (optional)
+# hostname ------ Name of host where CSR is running as a VM
+# tunnel I/F ---- CSR port name used for tunnels' IP address
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini
new file mode 100644
index 00000000..0ca9b46f
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini
@@ -0,0 +1,41 @@
+[heleos]
+#configure the ESM management address
+#in the first version of this plugin, only one ESM can be specified
+#Example:
+#esm_mgmt=
+
+#configure admin username and password
+#admin_username=
+#admin_password=
+
+#router image id
+#Example:
+#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0
+
+#mgmt shared security zone id
+#defines the shared management security zone. Each tenant can have a private one configured through the ESM
+#Example:
+#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a
+
+#in-band shared security zone id
+#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM
+#Example:
+#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc
+
+#oob-band shared security zone id
+#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM
+#Example:
+#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871
+
+#dummy security zone id
+#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces
+#Example:
+#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08
+
+#resource pool id
+#define the shared resource pool. Each tenant can have a private one configured through the ESM
+#Example
+#resource_pool_id=
+
+#define if the requests have to be executed asynchronously by the plugin or not
+#async_requests=
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
new file mode 100644
index 00000000..5eeec570
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
@@ -0,0 +1,63 @@
+[hyperv]
+# (StrOpt) Type of network to allocate for tenant networks. The
+# default value 'local' is useful only for single-box testing and
+# provides no connectivity between hosts. You MUST either change this
+# to 'vlan' and configure network_vlan_ranges below or to 'flat'.
+# Set to 'none' to disable creation of tenant networks.
+#
+# tenant_network_type = local
+# Example: tenant_network_type = vlan
+
+# (ListOpt) Comma-separated list of
+# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
+# of VLAN IDs on named physical networks that are available for
+# allocation. All physical networks listed are available for flat and
+# VLAN provider network creation. Specified ranges of VLAN IDs are
+# available for tenant network allocation if tenant_network_type is
+# 'vlan'. If empty, only gre and local networks may be created.
+#
+# network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999
+
+[agent]
+# Agent's polling interval in seconds
+# polling_interval = 2
+
+# (ListOpt) Comma separated list of <physical_network>:<vswitch>
+# where the physical networks can be expressed with wildcards,
+# e.g.: ."*:external".
+# The referred external virtual switches need to be already present on
+# the Hyper-V server.
+# If a given physical network name will not match any value in the list
+# the plugin will look for a virtual switch with the same name.
+#
+# physical_network_vswitch_mappings = *:external
+# Example: physical_network_vswitch_mappings = net1:external1,net2:external2
+
+# (StrOpt) Private virtual switch name used for local networking.
+#
+# local_network_vswitch = private
+# Example: local_network_vswitch = custom_vswitch
+
+# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's
+# metric APIs. Collected data can by retrieved by other apps and services,
+# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above.
+#
+# enable_metrics_collection = False
+
+#-----------------------------------------------------------------------------
+# Sample Configurations.
+#-----------------------------------------------------------------------------
+#
+# Neutron server:
+#
+# [HYPERV]
+# tenant_network_type = vlan
+# network_vlan_ranges = default:2000:3999
+#
+# Agent running on Hyper-V node:
+#
+# [AGENT]
+# polling_interval = 2
+# physical_network_vswitch_mappings = *:external
+# local_network_vswitch = private
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini
new file mode 100644
index 00000000..0fab5070
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini
@@ -0,0 +1,50 @@
+[sdnve]
+# (ListOpt) The IP address of one (or more) SDN-VE controllers
+# Default value is: controller_ips = 127.0.0.1
+# Example: controller_ips = 127.0.0.1,127.0.0.2
+# (StrOpt) The integration bridge for OF based implementation
+# The default value for integration_bridge is None
+# Example: integration_bridge = br-int
+# (ListOpt) The interface mapping connecting the integration
+# bridge to external network as a list of physical network names and
+# interfaces: <physical_network_name>:<interface_name>
+# Example: interface_mappings = default:eth2
+# (BoolOpt) Used to reset the integration bridge, if exists
+# The default value for reset_bridge is True
+# Example: reset_bridge = False
+# (BoolOpt) Used to set the OVS controller as out-of-band
+# The default value for out_of_band is True
+# Example: out_of_band = False
+#
+# (BoolOpt) The fake controller for testing purposes
+# Default value is: use_fake_controller = False
+# (StrOpt) The port number for use with controller
+# The default value for the port is 8443
+# Example: port = 8443
+# (StrOpt) The userid for use with controller
+# The default value for the userid is admin
+# Example: userid = sdnve_user
+# (StrOpt) The password for use with controller
+# The default value for the password is admin
+# Example: password = sdnve_password
+#
+# (StrOpt) The default type of tenants (and associated resources)
+# Available choices are: OVERLAY or OF
+# The default value for tenant type is OVERLAY
+# Example: default_tenant_type = OVERLAY
+# (StrOpt) The string in tenant description that indicates
+# Default value for OF tenants: of_signature = SDNVE-OF
+# (StrOpt) The string in tenant description that indicates
+# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY
+
+[sdnve_agent]
+# (IntOpt) Agent's polling interval in seconds
+# polling_interval = 2
+# (StrOpt) What to use for root helper
+# The default value: root_helper = 'sudo'
+# (BoolOpt) Whether to use rpc or not
+# The default value: rpc = True
+
+[securitygroup]
+# The security group is not supported:
+# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini
new file mode 100644
index 00000000..94fe9803
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini
@@ -0,0 +1,78 @@
+[vlans]
+# (StrOpt) Type of network to allocate for tenant networks. The
+# default value 'local' is useful only for single-box testing and
+# provides no connectivity between hosts. You MUST change this to
+# 'vlan' and configure network_vlan_ranges below in order for tenant
+# networks to provide connectivity between hosts. Set to 'none' to
+# disable creation of tenant networks.
+#
+# tenant_network_type = local
+# Example: tenant_network_type = vlan
+
+# (ListOpt) Comma-separated list of
+# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
+# of VLAN IDs on named physical networks that are available for
+# allocation. All physical networks listed are available for flat and
+# VLAN provider network creation. Specified ranges of VLAN IDs are
+# available for tenant network allocation if tenant_network_type is
+# 'vlan'. If empty, only local networks may be created.
+#
+# network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999
+
+[linux_bridge]
+# (ListOpt) Comma-separated list of
+# <physical_network>:<physical_interface> tuples mapping physical
+# network names to the agent's node-specific physical network
+# interfaces to be used for flat and VLAN networks. All physical
+# networks listed in network_vlan_ranges on the server should have
+# mappings to appropriate interfaces on each agent.
+#
+# physical_interface_mappings =
+# Example: physical_interface_mappings = physnet1:eth1
+
+[vxlan]
+# (BoolOpt) enable VXLAN on the agent
+# VXLAN support can be enabled when agent is managed by ml2 plugin using
+# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin.
+# enable_vxlan = False
+#
+# (IntOpt) use specific TTL for vxlan interface protocol packets
+# ttl =
+#
+# (IntOpt) use specific TOS for vxlan interface protocol packets
+# tos =
+#
+# (StrOpt) multicast group to use for broadcast emulation.
+# This group must be the same on all the agents.
+# vxlan_group = 224.0.0.1
+#
+# (StrOpt) Local IP address to use for VXLAN endpoints (required)
+# local_ip =
+#
+# (BoolOpt) Flag to enable l2population extension. This option should be used
+# in conjunction with ml2 plugin l2population mechanism driver (in that case,
+# both linuxbridge and l2population mechanism drivers should be loaded).
+# It enables plugin to populate VXLAN forwarding table, in order to limit
+# the use of broadcast emulation (multicast will be turned off if kernel and
+# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10)
+# l2_population = False
+
+[agent]
+# Agent's polling interval in seconds
+# polling_interval = 2
+
+# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
+# agents.
+#
+# rpc_support_old_agents = False
+# Example: rpc_support_old_agents = True
+
+[securitygroup]
+# Firewall driver for realizing neutron security group function
+# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini
new file mode 100644
index 00000000..2b9bfa5e
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini
@@ -0,0 +1,31 @@
+# Config file for Metaplugin
+
+[meta]
+# Comma separated list of flavor:neutron_plugin for plugins to load.
+# Extension method is searched in the list order and the first one is used.
+plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2'
+
+# Comma separated list of flavor:neutron_plugin for L3 service plugins
+# to load.
+# This is intended for specifying L2 plugins which support L3 functions.
+# If you use a router service plugin, set this blank.
+l3_plugin_list =
+
+# Default flavor to use, when flavor:network is not specified at network
+# creation.
+default_flavor = 'nvp'
+
+# Default L3 flavor to use, when flavor:router is not specified at router
+# creation.
+# Ignored if 'l3_plugin_list' is blank.
+default_l3_flavor =
+
+# Comma separated list of supported extension aliases.
+supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler'
+
+# Comma separated list of method:flavor to select specific plugin for a method.
+# This has priority over method search order based on 'plugin_list'.
+extension_map = 'get_port_stats:nvp'
+
+# Specifies flavor for plugin to handle 'q-plugin' RPC requests.
+rpc_flavor = 'ml2'
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini
new file mode 100644
index 00000000..f2e94052
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini
@@ -0,0 +1,19 @@
+
+[midonet]
+# MidoNet API server URI
+# midonet_uri = http://localhost:8080/midonet-api
+
+# MidoNet admin username
+# username = admin
+
+# MidoNet admin password
+# password = passw0rd
+
+# ID of the project that MidoNet admin user belongs to
+# project_id = 77777777-7777-7777-7777-777777777777
+
+# Virtual provider router ID
+# provider_router_id = 00112233-0011-0011-0011-001122334455
+
+# Path to midonet host uuid file
+# midonet_host_uuid_path = /etc/midolman/host_uuid.properties
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
new file mode 100644
index 00000000..b8097ce2
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
@@ -0,0 +1,86 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = flat,gre
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = gre
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = openvswitch
+
+# (ListOpt) Ordered list of extension driver entrypoints
+# to be loaded from the neutron.ml2.extension_drivers namespace.
+# extension_drivers =
+# Example: extension_drivers = anewextensiondriver
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+# flat_networks =
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+flat_networks = External
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+# network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+#network_vlan_ranges = Physnet1:100:200
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+# vni_ranges =
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+# vxlan_group =
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+enable_security_group = True
+
+# Use ipset to speed-up the iptables security groups. Enabling ipset support
+# requires that ipset is installed on L2 agent node.
+enable_ipset = True
+
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+
+[ovs]
+local_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
+enable_tunneling = True
+bridge_mappings=External:br-ex
+
+[agent]
+tunnel_types = gre
diff --git a/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini
new file mode 100644
index 00000000..abaf5bc7
--- /dev/null
+++ b/install-files/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini
@@ -0,0 +1,100 @@
+# Defines configuration options specific for Arista ML2 Mechanism driver
+
+[ml2_arista]
+# (StrOpt) EOS IP address. This is required field. If not set, all
+# communications to Arista EOS will fail
+#
+# eapi_host =
+# Example: eapi_host = 192.168.0.1
+#
+# (StrOpt) EOS command API username. This is required field.
+# if not set, all communications to Arista EOS will fail.
+#
+# eapi_username =
+# Example: arista_eapi_username = admin
+#
+# (StrOpt) EOS command API password. This is required field.
+# if not set, all communications to Arista EOS will fail.
+#
+# eapi_password =
+# Example: eapi_password = my_password
+#
+# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs
+# ("node1.domain.com") or as short names ("node1"). This is
+# optional. If not set, a value of "True" is assumed.
+#
+# use_fqdn =
+# Example: use_fqdn = True
+#
+# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
+# This field defines how often the synchronization is performed.
+# This is an optional field. If not set, a value of 180 seconds
+# is assumed.
+#
+# sync_interval =
+# Example: sync_interval = 60
+#
+# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller.
+# This is useful when multiple OpenStack/Neutron controllers are
+# managing the same Arista HW clusters. Note that this name must
+# match with the region name registered (or known) to keystone
+# service. Authentication with Keysotne is performed by EOS.
+# This is optional. If not set, a value of "RegionOne" is assumed.
+#
+# region_name =
+# Example: region_name = RegionOne
+
+
+[l3_arista]
+
+# (StrOpt) primary host IP address. This is required field. If not set, all
+# communications to Arista EOS will fail. This is the host where
+# primary router is created.
+#
+# primary_l3_host =
+# Example: primary_l3_host = 192.168.10.10
+#
+# (StrOpt) Primary host username. This is required field.
+# if not set, all communications to Arista EOS will fail.
+#
+# primary_l3_host_username =