summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Doffman <mark.doffman@codethink.co.uk>2014-09-03 19:12:50 +0000
committerMark Doffman <mark.doffman@codethink.co.uk>2014-10-02 02:46:23 +0000
commit0f0bb39104d35cfcf2a01eeb1866927945bcfa8a (patch)
tree98338062b2ac0101c95c94a7299eedff538fc614
parentf1da7ace54bed18c5c7eac5242ec84d4ca81c55d (diff)
downloaddefinitions-baserock/markdoffman/openstack.tar.gz
Openstack services strata and configuration.baserock/markdoffman/openstack
Add a stratum for openstack services that includes all the core openstack service code and daemons. Add configuration files for keystone, glance, nova and nova-compute. Add a system and cluster definition that builds an image that runs keystone, glance and nova.
-rw-r--r--clusters/openstack-cluster.morph32
-rw-r--r--openstack-glance.configure45
-rw-r--r--openstack-keystone.configure41
-rw-r--r--openstack-nova-compute.configure42
-rw-r--r--openstack-nova.configure44
-rw-r--r--openstack/etc/glance/glance-api-paste.ini72
-rw-r--r--openstack/etc/glance/glance-api.conf692
-rw-r--r--openstack/etc/glance/glance-cache.conf200
-rw-r--r--openstack/etc/glance/glance-registry-paste.ini25
-rw-r--r--openstack/etc/glance/glance-registry.conf197
-rw-r--r--openstack/etc/glance/glance-scrubber.conf56
-rw-r--r--openstack/etc/glance/logging.conf54
-rw-r--r--openstack/etc/glance/policy.json32
-rw-r--r--openstack/etc/glance/schema-image.json28
-rw-r--r--openstack/etc/keystone/keystone-paste.ini112
-rw-r--r--openstack/etc/keystone/keystone.conf1384
-rw-r--r--openstack/etc/keystone/logging.conf39
-rw-r--r--openstack/etc/keystone/policy.json144
-rw-r--r--openstack/etc/logrotate.d/openstack-glance-api7
-rw-r--r--openstack/etc/logrotate.d/openstack-glance-registry7
-rw-r--r--openstack/etc/logrotate.d/openstack-keystone8
-rw-r--r--openstack/etc/nova/api-paste.ini118
-rw-r--r--openstack/etc/nova/cells.json26
-rw-r--r--openstack/etc/nova/logging.conf81
-rw-r--r--openstack/etc/nova/nova.conf597
-rw-r--r--openstack/etc/nova/nova.conf.example3698
-rw-r--r--openstack/etc/nova/policy.json324
-rw-r--r--openstack/etc/nova/release.sample4
-rw-r--r--openstack/etc/nova/rootwrap.conf27
-rw-r--r--openstack/etc/nova/rootwrap.d/api-metadata.filters13
-rw-r--r--openstack/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters9
-rw-r--r--openstack/etc/nova/rootwrap.d/baremetal-deploy-helper.filters11
-rw-r--r--openstack/etc/nova/rootwrap.d/compute.filters228
-rw-r--r--openstack/etc/nova/rootwrap.d/network.filters94
-rw-r--r--openstack/etc/systemd/system/openstack-glance-api.service12
-rw-r--r--openstack/etc/systemd/system/openstack-glance-registry.service12
-rw-r--r--openstack/etc/systemd/system/openstack-glance-setup.service12
-rw-r--r--openstack/etc/systemd/system/openstack-keystone-setup.service12
-rw-r--r--openstack/etc/systemd/system/openstack-keystone.service12
-rw-r--r--openstack/etc/systemd/system/openstack-nova-setup.service12
-rw-r--r--openstack/manifest51
-rw-r--r--openstack/usr/share/openstack/openstack-glance-setup57
-rw-r--r--openstack/usr/share/openstack/openstack-keystone-setup56
-rw-r--r--openstack/usr/share/openstack/openstack-nova-compute-setup56
-rw-r--r--openstack/usr/share/openstack/openstack-nova-setup64
-rw-r--r--strata/openstack-services.morph531
-rw-r--r--strata/openstack-services/decorator.morph6
-rw-r--r--strata/openstack-services/docutils.morph6
-rw-r--r--strata/openstack-services/ipaddr-py.morph6
-rw-r--r--strata/openstack-services/qpid-python.morph6
-rw-r--r--strata/openstack-services/rabbitmq-server.morph7
-rw-r--r--strata/openstack-services/thrift.morph9
-rw-r--r--strata/openstack-services/xattr.morph8
-rw-r--r--systems/openstack-server.morph36
54 files changed, 9462 insertions, 0 deletions
diff --git a/clusters/openstack-cluster.morph b/clusters/openstack-cluster.morph
new file mode 100644
index 00000000..a4dc4c2a
--- /dev/null
+++ b/clusters/openstack-cluster.morph
@@ -0,0 +1,32 @@
+name: devel-raw
+kind: cluster
+systems:
+- morph: systems/openstack-server.morph
+ deploy:
+ release:
+ type: rawdisk
+ location: /src/release/baserock-xx-devel-system-x86_64-openstack.img
+ DISK_SIZE: 10G
+ VERSION_LABEL: Br-openstack-1
+ KERNEL_ARGS: console=ttyS0,115200
+ INSTALL_FILES: openstack/manifest
+ HOSTNAME: Br-Openstack
+ KEYSTONE_TEMPORARY_ADMIN_TOKEN: 22f3aa1cf538e3f6d5e8
+ KEYSTONE_TEMPORARY_ADMIN_PASSWORD: veryinsecure
+ KEYSTONE_PUBLIC_URL: http:\/\/localhost:5000\/v2.0
+ KEYSTONE_INTERNAL_URL: http:\/\/localhost:5000\/v2.0
+ KEYSTONE_ADMIN_URL: http:\/\/localhost:35357\/v2.0
+ OPENSTACK_AUTH_HOST: localhost
+ GLANCE_SERVICE_USER: glance
+ GLANCE_SERVICE_PASSWORD: veryinsecure
+ GLANCE_PUBLIC_URL: http:\/\/localhost:9292
+ GLANCE_INTERNAL_URL: http:\/\/localhost:9292
+ GLANCE_ADMIN_URL: http:\/\/localhost:9292
+ OPENSTACK_AUTH_HOST: localhost
+ NOVA_HOST: localhost
+ GLANCE_HOST: localhost
+ NOVA_SERVICE_USER: glance
+ NOVA_SERVICE_PASSWORD: veryinsecure
+ NOVA_PUBLIC_URL: http:\/\/localhost:8774\/v2\/\%\(tenant_id\)s
+ NOVA_INTERNAL_URL:http: \/\/localhost:8774\/v2\/\%\(tenant_id\)s
+ NOVA_ADMIN_URL: http:\/\/localhost:8774\/v2\/\%\(tenant_id\)s
diff --git a/openstack-glance.configure b/openstack-glance.configure
new file mode 100644
index 00000000..7a84c5e3
--- /dev/null
+++ b/openstack-glance.configure
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+##########################################################################
+# Substitutions in configuration files
+##########################################################################
+
+cat <<EOF > "$ROOT"/etc/openstack-glance-setup.sed
+s/##KEYSTONE_TEMPORARY_ADMIN_TOKEN##/$KEYSTONE_TEMPORARY_ADMIN_TOKEN/g
+s/##GLANCE_SERVICE_USER##/$GLANCE_SERVICE_USER/g
+s/##GLANCE_SERVICE_PASSWORD##/$GLANCE_SERVICE_PASSWORD/g
+s/##GLANCE_PUBLIC_URL##/$GLANCE_PUBLIC_URL/g
+s/##GLANCE_INTERNAL_URL##/$GLANCE_INTERNAL_URL/g
+s/##GLANCE_ADMIN_URL##/$GLANCE_ADMIN_URL/g
+EOF
+
+sed -f "$ROOT"/etc/openstack-glance-setup.sed -i \
+ "$ROOT"/etc/glance/glance-api.conf \
+ "$ROOT"/etc/glance/glance-registry.conf \
+ "$ROOT"/etc/glance/glance-scrubber.conf \
+ "$ROOT"/etc/glance/glance-cache.conf \
+ "$ROOT"/usr/share/openstack/openstack-glance-setup
+
+##########################################################################
+
+ln -s "/etc/systemd/system/openstack-glance-setup.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-glance-setup.service"
diff --git a/openstack-keystone.configure b/openstack-keystone.configure
new file mode 100644
index 00000000..445c041e
--- /dev/null
+++ b/openstack-keystone.configure
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+##########################################################################
+# Substitutions in configuration files
+##########################################################################
+
+cat <<EOF > "$ROOT"/etc/openstack-keystone-setup.sed
+s/##KEYSTONE_TEMPORARY_ADMIN_TOKEN##/$KEYSTONE_TEMPORARY_ADMIN_TOKEN/g
+s/##KEYSTONE_TEMPORARY_ADMIN_PASSWORD##/$KEYSTONE_TEMPORARY_ADMIN_PASSWORD/g
+s/##KEYSTONE_PUBLIC_URL##/$KEYSTONE_PUBLIC_URL/g
+s/##KEYSTONE_INTERNAL_URL##/$KEYSTONE_INTERNAL_URL/g
+s/##KEYSTONE_ADMIN_URL##/$KEYSTONE_ADMIN_URL/g
+EOF
+
+sed -f "$ROOT"/etc/openstack-keystone-setup.sed -i \
+ "$ROOT"/etc/keystone/keystone.conf \
+ "$ROOT"/usr/share/openstack/openstack-keystone-setup
+
+##########################################################################
+
+ln -s "/etc/systemd/system/openstack-keystone-setup.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-keystone-setup.service"
diff --git a/openstack-nova-compute.configure b/openstack-nova-compute.configure
new file mode 100644
index 00000000..4fd9bb1e
--- /dev/null
+++ b/openstack-nova-compute.configure
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+##########################################################################
+# Substitutions in configuration files
+##########################################################################
+
+cat <<EOF > "$ROOT"/etc/openstack-nova-compute-setup.sed
+s/##KEYSTONE_TEMPORARY_ADMIN_TOKEN##/$KEYSTONE_TEMPORARY_ADMIN_TOKEN/g
+s/##NOVA_SERVICE_USER##/$NOVA_SERVICE_USER/g
+s/##NOVA_SERVICE_PASSWORD##/$NOVA_SERVICE_PASSWORD/g
+s/##NOVA_PUBLIC_URL##/$NOVA_PUBLIC_URL/g
+s/##NOVA_INTERNAL_URL##/$NOVA_INTERNAL_URL/g
+s/##NOVA_ADMIN_URL##/$NOVA_ADMIN_URL/g
+EOF
+
+sed -f "$ROOT"/etc/openstack-nova-compute-setup.sed -i \
+ "$ROOT"/etc/nova/nova.conf \
+ "$ROOT"/usr/share/openstack/openstack-nova-compute-setup
+
+##########################################################################
+
+ln -s "/etc/systemd/system/openstack-nova-compute-setup.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-compute-setup.service"
diff --git a/openstack-nova.configure b/openstack-nova.configure
new file mode 100644
index 00000000..c1f365fd
--- /dev/null
+++ b/openstack-nova.configure
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+ROOT="$1"
+
+##########################################################################
+# Substitutions in configuration files
+##########################################################################
+
+cat <<EOF > "$ROOT"/etc/openstack-nova-setup.sed
+s/##KEYSTONE_TEMPORARY_ADMIN_TOKEN##/$KEYSTONE_TEMPORARY_ADMIN_TOKEN/g
+s/##NOVA_SERVICE_USER##/$NOVA_SERVICE_USER/g
+s/##NOVA_SERVICE_PASSWORD##/$NOVA_SERVICE_PASSWORD/g
+s/##NOVA_PUBLIC_URL##/$NOVA_PUBLIC_URL/g
+s/##NOVA_INTERNAL_URL##/$NOVA_INTERNAL_URL/g
+s/##NOVA_ADMIN_URL##/$NOVA_ADMIN_URL/g
+s/##NOVA_HOST##/$NOVA_HOST/g
+s/##GLANCE_HOST##/$GLANCE_HOST/g
+EOF
+
+sed -f "$ROOT"/etc/openstack-nova-setup.sed -i \
+ "$ROOT"/etc/nova/nova.conf \
+ "$ROOT"/usr/share/openstack/openstack-nova-setup
+
+##########################################################################
+
+ln -s "/etc/systemd/system/openstack-nova-setup.service" \
+ "$ROOT/etc/systemd/system/multi-user.target.wants/openstack-nova-setup.service"
diff --git a/openstack/etc/glance/glance-api-paste.ini b/openstack/etc/glance/glance-api-paste.ini
new file mode 100644
index 00000000..4f8f6599
--- /dev/null
+++ b/openstack/etc/glance/glance-api-paste.ini
@@ -0,0 +1,72 @@
+# Use this pipeline for no auth or image caching - DEFAULT
+[pipeline:glance-api]
+pipeline = versionnegotiation unauthenticated-context rootapp
+
+# Use this pipeline for image caching and no auth
+[pipeline:glance-api-caching]
+pipeline = versionnegotiation unauthenticated-context cache rootapp
+
+# Use this pipeline for caching w/ management interface but no auth
+[pipeline:glance-api-cachemanagement]
+pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-api-keystone]
+pipeline = versionnegotiation authtoken context rootapp
+
+# Use this pipeline for keystone auth with image caching
+[pipeline:glance-api-keystone+caching]
+pipeline = versionnegotiation authtoken context cache rootapp
+
+# Use this pipeline for keystone auth with caching and cache management
+[pipeline:glance-api-keystone+cachemanagement]
+pipeline = versionnegotiation authtoken context cache cachemanage rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-api-trusted-auth]
+pipeline = versionnegotiation context rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user and uses cache management
+[pipeline:glance-api-trusted-auth+cachemanagement]
+pipeline = versionnegotiation context cache cachemanage rootapp
+
+[composite:rootapp]
+paste.composite_factory = glance.api:root_app_factory
+/: apiversions
+/v1: apiv1app
+/v2: apiv2app
+
+[app:apiversions]
+paste.app_factory = glance.api.versions:create_resource
+
+[app:apiv1app]
+paste.app_factory = glance.api.v1.router:API.factory
+
+[app:apiv2app]
+paste.app_factory = glance.api.v2.router:API.factory
+
+[filter:versionnegotiation]
+paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
+
+[filter:cache]
+paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
+
+[filter:cachemanage]
+paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+delay_auth_decision = true
+
+[filter:gzip]
+paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
diff --git a/openstack/etc/glance/glance-api.conf b/openstack/etc/glance/glance-api.conf
new file mode 100644
index 00000000..90215d36
--- /dev/null
+++ b/openstack/etc/glance/glance-api.conf
@@ -0,0 +1,692 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Which backend scheme should Glance use by default is not specified
+# in a request to add a new image to Glance? Known schemes are determined
+# by the known_stores option below.
+# Default: 'file'
+default_store = file
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# Existing but disabled stores:
+# glance.store.rbd.Store,
+# glance.store.s3.Store,
+# glance.store.swift.Store,
+# glance.store.sheepdog.Store,
+# glance.store.cinder.Store,
+# glance.store.gridfs.Store,
+# glance.store.vmware_datastore.Store,
+#known_stores = glance.store.filesystem.Store,
+# glance.store.http.Store
+
+
+# Maximum image size (in bytes) that may be uploaded through the
+# Glance API server. Defaults to 1 TB.
+# WARNING: this value should only be increased after careful consideration
+# and must be set to a value under 8 EB (9223372036854775808).
+#image_size_cap = 1099511627776
+
+# Address to bind the API server
+bind_host = 0.0.0.0
+
+# Port the bind the API server to
+bind_port = 9292
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/api.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+# data_api = glance.db.sqlalchemy.api
+
+# Number of Glance API worker processes to start.
+# On machines with more than one CPU increasing this value
+# may improve performance (especially if using SSL with
+# compression turned on). It is typically recommended to set
+# this value to the number of CPUs present on your machine.
+workers = 1
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large tokens
+# (typically those generated by the Keystone v3 API with big service
+# catalogs)
+# max_header_line = 16384
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware.
+#allow_anonymous_access = False
+
+# Allow access to version 1 of glance api
+#enable_v1_api = True
+
+# Allow access to version 2 of glance api
+#enable_v2_api = True
+
+# Return the URL that references where the data is stored on
+# the backend storage system. For example, if using the
+# file system store a URL of 'file:///path/to/image' will
+# be returned to the user in the 'direct_url' meta-data field.
+# The default value is false.
+#show_image_direct_url = False
+
+# Send headers containing user and tenant information when making requests to
+# the v1 glance registry. This allows the registry to function as if a user is
+# authenticated without the need to authenticate a user itself using the
+# auth_token middleware.
+# The default value is false.
+#send_identity_headers = False
+
+# Supported values for the 'container_format' image attribute
+#container_formats=ami,ari,aki,bare,ovf,ova
+
+# Supported values for the 'disk_format' image attribute
+#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# Property Protections config file
+# This file contains the rules for property protections and the roles/policies
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then the glance-api
+# service will not start.
+#property_protection_file =
+
+# Specify whether 'roles' or 'policies' are used in the
+# property_protection_file.
+# The default value for property_protection_rule_format is 'roles'.
+#property_protection_rule_format = roles
+
+# Specifies how long (in hours) a task is supposed to live in the tasks DB
+# after succeeding or failing before getting soft-deleted.
+# The default value for task_time_to_live is 48 hours.
+# task_time_to_live = 48
+
+# This value sets what strategy will be used to determine the image location
+# order. Currently two strategies are packaged with Glance 'location_order'
+# and 'store_type'.
+#location_strategy = location_order
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL0
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting API server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ============ Registry Options ===============================
+
+# Address to find the registry server
+registry_host = 0.0.0.0
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# What protocol to use when connecting to the registry server?
+# Set to https for secure HTTP communication
+registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
+#registry_client_key_file = /path/to/key/file
+
+# The path to the cert file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
+#registry_client_cert_file = /path/to/cert/file
+
+# The path to the certifying authority cert file to use in SSL connections
+# to the registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
+#registry_client_ca_file = /path/to/ca/file
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's equivalent of
+# specifying --insecure on the command line using glanceclient for the API
+# Default: False
+#registry_client_insecure = False
+
+# The period of time, in seconds, that the API server will wait for a registry
+# request to complete. A value of '0' implies no timeout.
+# Default: 600
+#registry_client_timeout = 600
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# Pass the user's token through for API requests to the registry.
+# Default: True
+#use_user_token = True
+
+# If 'use_user_token' is not in effect then admin credentials
+# can be specified. Requests to the registry on behalf of
+# the API will use these credentials.
+# Admin user name
+#admin_user = None
+# Admin password
+#admin_password = None
+# Admin tenant name
+#admin_tenant_name = None
+# Keystone endpoint
+#auth_url = None
+# Keystone region
+#auth_region = None
+# Auth strategy
+#auth_strategy = keystone
+
+# ============ Notification System Options =====================
+
+# Driver or drivers to handle sending notifications. Set to
+# 'messaging' to send notifications to a message queue.
+# notification_driver = noop
+
+# Default publisher_id for outgoing notifications.
+# default_publisher_id = image.localhost
+
+# Messaging driver used for 'messaging' notifications driver
+# rpc_backend = 'rabbit'
+
+# Configuration options if sending notifications via rabbitmq (these are
+# the defaults)
+rabbit_host = localhost
+rabbit_port = 5672
+rabbit_use_ssl = false
+rabbit_userid = guest
+rabbit_password = guest
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+qpid_notification_exchange = glance
+qpid_notification_topic = notifications
+qpid_hostname = localhost
+qpid_port = 5672
+qpid_username =
+qpid_password =
+qpid_sasl_mechanisms =
+qpid_reconnect_timeout = 0
+qpid_reconnect_limit = 0
+qpid_reconnect_interval_min = 0
+qpid_reconnect_interval_max = 0
+qpid_reconnect_interval = 0
+qpid_heartbeat = 5
+# Set to 'ssl' to enable SSL
+qpid_protocol = tcp
+qpid_tcp_nodelay = True
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir = /var/lib/glance/images/
+
+# A list of directories where image data can be stored.
+# This option may be specified multiple times for specifying multiple store
+# directories. Either one of filesystem_store_datadirs or
+# filesystem_store_datadir option is required. A priority number may be given
+# after each directory entry, separated by a ":".
+# When adding an image, the highest priority directory will be selected, unless
+# there is not enough space available in cases where the image size is already
+# known. If no priority is given, it is assumed to be zero and the directory
+# will be considered for selection last. If multiple directories have the same
+# priority, then the one with the most free space available is selected.
+# If same store is specified multiple times then BadStoreConfiguration
+# exception will be raised.
+#filesystem_store_datadirs = /var/lib/glance/images/:1
+
+# A path to a JSON file that contains metadata describing the storage
+# system. When show_multiple_locations is True the information in this
+# file will be returned with any location that is contained in this
+# store.
+#filesystem_store_metadata_file = None
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# swift_store_config_file = glance-swift.conf
+# This file contains references for each of the configured
+# Swift accounts/backing stores. If used, this option can prevent
+# credentials being stored in the database. Using Swift references
+# is disabled if this config is left blank.
+
+# The reference to the default Swift parameters to use for adding new images.
+# default_swift_reference = 'ref1'
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# If set to True enables multi-tenant storage mode which causes Glance images
+# to be stored in tenant specific Swift accounts.
+#swift_store_multi_tenant = False
+
+# A list of swift ACL strings that will be applied as both read and
+# write ACLs to the containers created by Glance in multi-tenant
+# mode. This grants the specified tenants/users read and write access
+# to all newly created image objects. The standard swift ACL string
+# formats are allowed, including:
+# <tenant_id>:<username>
+# <tenant_name>:<username>
+# *:<username>
+# Multiple ACLs can be combined using a comma separated list, for
+# example: swift_store_admin_tenants = service:glance,*:admin
+#swift_store_admin_tenants =
+
+# The region of the swift endpoint to be used for single tenant. This setting
+# is only necessary if the tenant has multiple swift endpoints.
+#swift_store_region =
+
+# If set to False, disables SSL layer compression of https swift requests.
+# Setting to 'False' may improve performance for images which are already
+# in a compressed format, eg qcow2. If set to True, enables SSL layer
+# compression (provided it is supported by the target swift proxy).
+#swift_store_ssl_compression = True
+
+# The number of times a Swift download will be retried before the
+# request fails
+#swift_store_retry_get_count = 0
+
+# Bypass SSL verification for Swift
+#swift_store_auth_insecure = False
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+#s3_store_object_buffer_dir = /path/to/dir
+
+# When forming a bucket url, boto will either set the bucket name as the
+# subdomain or as the first token of the path. Amazon's S3 service will
+# accept it as the subdomain, but Swift's S3 middleware requires it be
+# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
+#s3_store_bucket_url_format = subdomain
+
+# Size, in MB, should S3 start chunking image files
+# and do a multipart upload in S3. The default is 100MB.
+#s3_store_large_object_size = 100
+
+# Multipart upload part size, in MB, should S3 use when uploading
+# parts. The size must be greater than or equal to
+# 5MB. The default is 10MB.
+#s3_store_large_object_chunk_size = 10
+
+# The number of thread pools to perform a multipart upload
+# in S3. The default is 10.
+#s3_store_thread_pools = 10
+
+# ============ RBD Store Options =============================
+
+# Ceph configuration file path
+# If using cephx authentication, this file should
+# include a reference to the right keyring
+# in a client.<USER> section
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# RADOS user to authenticate as (only applicable if using cephx)
+# If <None>, a default will be chosen based on the client. section
+# in rbd_store_ceph_conf
+#rbd_store_user = <None>
+
+# RADOS pool in which images are stored
+#rbd_store_pool = images
+
+# RADOS images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+#rbd_store_chunk_size = 8
+
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+sheepdog_store_chunk_size = 64
+
+# ============ Cinder Store Options ===============================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ VMware Datastore Store Options =====================
+
+# ESX/ESXi or vCenter Server target system.
+# The server value can be an IP address or a DNS name
+# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
+#vmware_server_host = <None>
+
+# Server username (string value)
+#vmware_server_username = <None>
+
+# Server password (string value)
+#vmware_server_password = <None>
+
+# Inventory path to a datacenter (string value)
+# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
+# should be `ha-datacenter`.
+#vmware_datacenter_path = <None>
+
+# Datastore associated with the datacenter (string value)
+#vmware_datastore_name = <None>
+
+# The number of times we retry on failures
+# e.g., socket error, etc (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks
+# invoked on VMware ESX/VC server in seconds (integer value)
+#vmware_task_poll_interval = 5
+
+# Absolute path of the folder containing the images in the datastore
+# (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# Allow to perform insecure SSL requests to the target system (boolean value)
+#vmware_api_insecure = False
+
+# ============ Delayed Delete Options =============================
+
+# Turn on/off delayed delete
+delayed_delete = False
+
+# Delayed delete time in seconds
+scrub_time = 43200
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-scrubber.conf
+scrubber_datadir = /var/lib/glance/scrubber
+
+# =============== Quota Options ==================================
+
+# The maximum number of image members allowed per image
+#image_member_quota = 128
+
+# The maximum number of image properties allowed per image
+#image_property_quota = 128
+
+# The maximum number of tags allowed per image
+#image_tag_quota = 128
+
+# The maximum number of locations allowed per image
+#image_location_quota = 10
+
+# Set a system wide quota for every user. This value is the total number
+# of bytes that a user can use across all storage systems. A value of
+# 0 means unlimited.
+#user_storage_quota = 0
+
+# =============== Image Cache Options =============================
+
+# Base directory that the Image Cache uses
+image_cache_dir = /var/lib/glance/image-cache/
+
+# =============== Manager Options =================================
+
+# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE.
+# Whether or not to enforce that all DB tables have charset utf8.
+# If your database tables do not have charset utf8 you will
+# need to convert before this option is removed. This option is
+# only relevant if your database engine is MySQL.
+#db_enforce_mysql_charset = True
+
+# =============== Database Options =================================
+
+[database]
+# The file name to use with SQLite (string value)
+#sqlite_db = glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection=sqlite:////var/lib/glance/glance.sqlite
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_host = ##OPENSTACK_AUTH_HOST##
+auth_port = 35357
+auth_protocol = http
+admin_tenant_name = service
+admin_user = ##GLANCE_SERVICE_USER##
+admin_password = ##GLANCE_SERVICE_PASSWORD##
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-api-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone], you would configure the flavor below
+# as 'keystone'.
+#flavor=
+
+[store_type_location_strategy]
+# The scheme list to use to get store preference order. The scheme must be
+# registered by one of the stores defined by the 'known_stores' config option.
+# This option will be applied when you using 'store_type' option as image
+# location strategy defined by the 'location_strategy' config option.
+#store_type_preference =
diff --git a/openstack/etc/glance/glance-cache.conf b/openstack/etc/glance/glance-cache.conf
new file mode 100644
index 00000000..0246b672
--- /dev/null
+++ b/openstack/etc/glance/glance-cache.conf
@@ -0,0 +1,200 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/image-cache.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+#use_syslog = False
+
+# Directory that the Image Cache writes data to
+image_cache_dir = /var/lib/glance/image-cache/
+
+# Number of seconds after which we should consider an incomplete image to be
+# stalled and eligible for reaping
+image_cache_stall_time = 86400
+
+# Max cache size in bytes
+image_cache_max_size = 10737418240
+
+# Address to find the registry server
+registry_host = 0.0.0.0
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# known_stores = glance.store.filesystem.Store,
+# glance.store.http.Store,
+# glance.store.rbd.Store,
+# glance.store.s3.Store,
+# glance.store.swift.Store,
+# glance.store.sheepdog.Store,
+# glance.store.cinder.Store,
+# glance.store.vmware_datastore.Store,
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir = /var/lib/glance/images/
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+# s3_store_object_buffer_dir = /path/to/dir
+
+# ============ Cinder Store Options ===========================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ VMware Datastore Store Options =====================
+
+# ESX/ESXi or vCenter Server target system.
+# The server value can be an IP address or a DNS name
+# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
+#vmware_server_host = <None>
+
+# Server username (string value)
+#vmware_server_username = <None>
+
+# Server password (string value)
+#vmware_server_password = <None>
+
+# Inventory path to a datacenter (string value)
+# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
+# should be `ha-datacenter`.
+#vmware_datacenter_path = <None>
+
+# Datastore associated with the datacenter (string value)
+#vmware_datastore_name = <None>
+
+# The number of times we retry on failures
+# e.g., socket error, etc (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks
+# invoked on VMware ESX/VC server in seconds (integer value)
+#vmware_task_poll_interval = 5
+
+# Absolute path of the folder containing the images in the datastore
+# (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# Allow to perform insecure SSL requests to the target system (boolean value)
+#vmware_api_insecure = False
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+# metadata_encryption_key = <16, 24 or 32 char registry metadata key>
diff --git a/openstack/etc/glance/glance-registry-paste.ini b/openstack/etc/glance/glance-registry-paste.ini
new file mode 100644
index 00000000..d9f6eb35
--- /dev/null
+++ b/openstack/etc/glance/glance-registry-paste.ini
@@ -0,0 +1,25 @@
+# Use this pipeline for no auth - DEFAULT
+[pipeline:glance-registry]
+pipeline = unauthenticated-context registryapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-registry-keystone]
+pipeline = authtoken context registryapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-registry-trusted-auth]
+pipeline = context registryapp
+
+[app:registryapp]
+paste.app_factory = glance.registry.api:API.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
diff --git a/openstack/etc/glance/glance-registry.conf b/openstack/etc/glance/glance-registry.conf
new file mode 100644
index 00000000..3885116b
--- /dev/null
+++ b/openstack/etc/glance/glance-registry.conf
@@ -0,0 +1,197 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Address to bind the registry server
+bind_host = 0.0.0.0
+
+# Port the bind the registry server to
+bind_port = 9191
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/registry.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package.
+#data_api = glance.db.sqlalchemy.api
+
+# Number of Glance Registry worker processes to start.
+# On machines with more than one CPU increasing this value
+# may improve performance (especially if using SSL with
+# compression turned on). It is typically recommended to set
+# this value to the number of CPUs present on your machine.
+workers = 1
+
+# Enable Registry API versions individually or simultaneously
+#enable_v1_registry = True
+#enable_v2_registry = True
+
+# Limit the api to return `param_limit_max` items in a call to a container. If
+# a larger `limit` query param is provided, it will be reduced to this value.
+api_limit_max = 1000
+
+# If a `limit` query param is not provided in an api request, it will
+# default to `limit_param_default`
+limit_param_default = 25
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL1
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting registry server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting registry server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Database Options ==========================
+
+[database]
+# The file name to use with SQLite (string value)
+#sqlite_db = glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_host = ##OPENSTACK_AUTH_HOST##
+auth_port = 35357
+auth_protocol = http
+admin_tenant_name = service
+admin_user = ##GLANCE_SERVICE_USER##
+admin_password = ##GLANCE_SERVICE_PASSWORD##
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-registry-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-registry-keystone], you would configure the flavor below
+# as 'keystone'.
+#flavor=
diff --git a/openstack/etc/glance/glance-scrubber.conf b/openstack/etc/glance/glance-scrubber.conf
new file mode 100644
index 00000000..5c5e8d4c
--- /dev/null
+++ b/openstack/etc/glance/glance-scrubber.conf
@@ -0,0 +1,56 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/scrubber.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+#use_syslog = False
+
+# Should we run our own loop or rely on cron/scheduler to run us
+daemon = False
+
+# Loop time between checking for new items to schedule for delete
+wakeup_time = 300
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-api.conf
+scrubber_datadir = /var/lib/glance/scrubber
+
+# Only one server in your deployment should be designated the cleanup host
+cleanup_scrubber = False
+
+# pending_delete items older than this time are candidates for cleanup
+cleanup_scrubber_time = 86400
+
+# Address to find the registry server for cleanups
+registry_host = 0.0.0.0
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
diff --git a/openstack/etc/glance/logging.conf b/openstack/etc/glance/logging.conf
new file mode 100644
index 00000000..7e7f31f0
--- /dev/null
+++ b/openstack/etc/glance/logging.conf
@@ -0,0 +1,54 @@
+[loggers]
+keys=root,api,registry,combined
+
+[formatters]
+keys=normal,normal_with_name,debug
+
+[handlers]
+keys=production,file,devel
+
+[logger_root]
+level=NOTSET
+handlers=devel
+
+[logger_api]
+level=DEBUG
+handlers=devel
+qualname=glance-api
+
+[logger_registry]
+level=DEBUG
+handlers=devel
+qualname=glance-registry
+
+[logger_combined]
+level=DEBUG
+handlers=devel
+qualname=glance-combined
+
+[handler_production]
+class=handlers.SysLogHandler
+level=ERROR
+formatter=normal_with_name
+args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
+
+[handler_file]
+class=FileHandler
+level=DEBUG
+formatter=normal_with_name
+args=('glance.log', 'w')
+
+[handler_devel]
+class=StreamHandler
+level=NOTSET
+formatter=debug
+args=(sys.stdout,)
+
+[formatter_normal]
+format=%(asctime)s %(levelname)s %(message)s
+
+[formatter_normal_with_name]
+format=(%(name)s): %(asctime)s %(levelname)s %(message)s
+
+[formatter_debug]
+format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
diff --git a/openstack/etc/glance/policy.json b/openstack/etc/glance/policy.json
new file mode 100644
index 00000000..8b7e6871
--- /dev/null
+++ b/openstack/etc/glance/policy.json
@@ -0,0 +1,32 @@
+{
+ "context_is_admin": "role:admin",
+ "default": "",
+
+ "add_image": "",
+ "delete_image": "",
+ "get_image": "",
+ "get_images": "",
+ "modify_image": "",
+ "publicize_image": "role:admin",
+ "copy_from": "",
+
+ "download_image": "",
+ "upload_image": "",
+
+ "delete_image_location": "",
+ "get_image_location": "",
+ "set_image_location": "",
+
+ "add_member": "",
+ "delete_member": "",
+ "get_member": "",
+ "get_members": "",
+ "modify_member": "",
+
+ "manage_image_cache": "role:admin",
+
+ "get_task": "",
+ "get_tasks": "",
+ "add_task": "",
+ "modify_task": ""
+}
diff --git a/openstack/etc/glance/schema-image.json b/openstack/etc/glance/schema-image.json
new file mode 100644
index 00000000..5aafd6b3
--- /dev/null
+++ b/openstack/etc/glance/schema-image.json
@@ -0,0 +1,28 @@
+{
+ "kernel_id": {
+ "type": "string",
+ "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+ "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
+ },
+ "ramdisk_id": {
+ "type": "string",
+ "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+ "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
+ },
+ "instance_uuid": {
+ "type": "string",
+ "description": "ID of instance used to create this image."
+ },
+ "architecture": {
+ "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+ "type": "string"
+ },
+ "os_distro": {
+ "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+ "type": "string"
+ },
+ "os_version": {
+ "description": "Operating system version as specified by the distributor",
+ "type": "string"
+ }
+}
diff --git a/openstack/etc/keystone/keystone-paste.ini b/openstack/etc/keystone/keystone-paste.ini
new file mode 100644
index 00000000..cd132971
--- /dev/null
+++ b/openstack/etc/keystone/keystone-paste.ini
@@ -0,0 +1,112 @@
+# Keystone PasteDeploy configuration file.
+
+[filter:debug]
+paste.filter_factory = keystone.common.wsgi:Debug.factory
+
+[filter:build_auth_context]
+paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory
+
+[filter:token_auth]
+paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
+
+[filter:admin_token_auth]
+paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
+
+[filter:xml_body]
+paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
+
+[filter:xml_body_v2]
+paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV2.factory
+
+[filter:xml_body_v3]
+paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV3.factory
+
+[filter:json_body]
+paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
+
+[filter:user_crud_extension]
+paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
+
+[filter:crud_extension]
+paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
+
+[filter:ec2_extension]
+paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
+
+[filter:ec2_extension_v3]
+paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory
+
+[filter:federation_extension]
+paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory
+
+[filter:oauth1_extension]
+paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory
+
+[filter:s3_extension]
+paste.filter_factory = keystone.contrib.s3:S3Extension.factory
+
+[filter:endpoint_filter_extension]
+paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory
+
+[filter:simple_cert_extension]
+paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory
+
+[filter:revoke_extension]
+paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory
+
+[filter:url_normalize]
+paste.filter_factory = keystone.middleware:NormalizingFilter.factory
+
+[filter:sizelimit]
+paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
+
+[filter:stats_monitoring]
+paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
+
+[filter:stats_reporting]
+paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
+
+[filter:access_log]
+paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
+
+[app:public_service]
+paste.app_factory = keystone.service:public_app_factory
+
+[app:service_v3]
+paste.app_factory = keystone.service:v3_app_factory
+
+[app:admin_service]
+paste.app_factory = keystone.service:admin_app_factory
+
+[pipeline:public_api]
+pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension user_crud_extension public_service
+
+[pipeline:admin_api]
+pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension s3_extension crud_extension admin_service
+
+[pipeline:api_v3]
+pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v3 json_body ec2_extension_v3 s3_extension simple_cert_extension service_v3
+
+[app:public_version_service]
+paste.app_factory = keystone.service:public_version_app_factory
+
+[app:admin_version_service]
+paste.app_factory = keystone.service:admin_version_app_factory
+
+[pipeline:public_version_api]
+pipeline = sizelimit url_normalize xml_body public_version_service
+
+[pipeline:admin_version_api]
+pipeline = sizelimit url_normalize xml_body admin_version_service
+
+[composite:main]
+use = egg:Paste#urlmap
+/v2.0 = public_api
+/v3 = api_v3
+/ = public_version_api
+
+[composite:admin]
+use = egg:Paste#urlmap
+/v2.0 = admin_api
+/v3 = api_v3
+/ = admin_version_api
diff --git a/openstack/etc/keystone/keystone.conf b/openstack/etc/keystone/keystone.conf
new file mode 100644
index 00000000..fba10a36
--- /dev/null
+++ b/openstack/etc/keystone/keystone.conf
@@ -0,0 +1,1384 @@
+[DEFAULT]
+
+#
+# Options defined in keystone
+#
+
+# A "shared secret" that can be used to bootstrap Keystone.
+# This "token" does not represent a user, and carries no
+# explicit authorization. To disable in production (highly
+# recommended), remove AdminTokenAuthMiddleware from your
+# paste application pipelines (for example, in keystone-
+# paste.ini). (string value)
+admin_token=##KEYSTONE_TEMPORARY_ADMIN_TOKEN##
+
+# The IP address of the network interface for the public
+# service to listen on. (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+#public_bind_host=0.0.0.0
+
+# The IP address of the network interface for the admin
+# service to listen on. (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+#admin_bind_host=0.0.0.0
+
+# The port which the OpenStack Compute service listens on.
+# (integer value)
+#compute_port=8774
+
+# The port number which the admin service listens on. (integer
+# value)
+#admin_port=35357
+
+# The port number which the public service listens on.
+# (integer value)
+#public_port=5000
+
+# The base public endpoint URL for Keystone that is advertised
+# to clients (NOTE: this does NOT affect how Keystone listens
+# for connections). Defaults to the base host URL of the
+# request. E.g. a request to http://server:5000/v2.0/users
+# will default to http://server:5000. You should only need to
+# set this value if the base URL contains a path (e.g.
+# /prefix/v2.0) or the endpoint should be found on a different
+# server. (string value)
+#public_endpoint=<None>
+
+# The base admin endpoint URL for Keystone that is advertised
+# to clients (NOTE: this does NOT affect how Keystone listens
+# for connections). Defaults to the base host URL of the
+# request. E.g. a request to http://server:35357/v2.0/users
+# will default to http://server:35357. You should only need to
+# set this value if the base URL contains a path (e.g.
+# /prefix/v2.0) or the endpoint should be found on a different
+# server. (string value)
+#admin_endpoint=<None>
+
+# The number of worker processes to serve the public WSGI
+# application (integer value)
+#public_workers=1
+
+# The number of worker processes to serve the admin WSGI
+# application (integer value)
+#admin_workers=1
+
+# Enforced by optional sizelimit middleware
+# (keystone.middleware:RequestBodySizeLimiter). (integer
+# value)
+#max_request_body_size=114688
+
+# Limit the sizes of user & project ID/names. (integer value)
+#max_param_size=64
+
+# Similar to max_param_size, but provides an exception for
+# token values. (integer value)
+#max_token_size=8192
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the assignment table
+# with explicit role grants. After migration, the
+# member_role_id will be used in the API add_user_to_project.
+# (string value)
+#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab
+
+# During a SQL upgrade member_role_name will be used to create
+# a new role that will replace records in the assignment table
+# with explicit role grants. After migration, member_role_name
+# will be ignored. (string value)
+#member_role_name=_member_
+
+# The value passed as the keyword "rounds" to passlib's
+# encrypt method. (integer value)
+#crypt_strength=40000
+
+# Set this to true if you want to enable TCP_KEEPALIVE on
+# server sockets, i.e. sockets used by the Keystone wsgi
+# server for client connections. (boolean value)
+#tcp_keepalive=false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Only applies if tcp_keepalive is true. Not supported
+# on OS X. (integer value)
+#tcp_keepidle=600
+
+# The maximum number of entities that will be returned in a
+# collection, with no limit set by default. This global limit
+# may be then overridden for a specific driver, by specifying
+# a list_limit in the appropriate section (e.g. [assignment]).
+# (integer value)
+#list_limit=<None>
+
+# Set this to false if you want to enable the ability for
+# user, group and project entities to be moved between domains
+# by updating their domain_id. Allowing such movement is not
+# recommended if the scope of a domain admin is being
+# restricted by use of an appropriate policy file (see
+# policy.v3cloudsample as an example). (boolean value)
+#domain_id_immutable=true
+
+# If set to true, strict password length checking is performed
+# for password manipulation. If a password exceeds the maximum
+# length, the operation will fail with an HTTP 403 Forbidden
+# error. If set to false, passwords are automatically
+# truncated to the maximum length. (boolean value)
+#strict_password_check=false
+
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=keystone
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+#rabbit_host=keystone
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+#rabbit_userid=guest
+
+# The RabbitMQ password. (string value)
+#rabbit_password=guest
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=keystone
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in keystone.notifications
+#
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in keystone.middleware.ec2_token
+#
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+# Required if EC2 server requires client certificate. (string
+# value)
+#keystone_ec2_keyfile=<None>
+
+# Client certificate key filename. Required if EC2 server
+# requires client certificate. (string value)
+#keystone_ec2_certfile=<None>
+
+# A PEM encoded certificate authority to use when verifying
+# HTTPS connections. Defaults to the system CAs. (string
+# value)
+#keystone_ec2_cafile=<None>
+
+# Disable SSL certificate verification. (boolean value)
+#keystone_ec2_insecure=false
+
+
+#
+# Options defined in keystone.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in keystone.openstack.common.lockutils
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in keystone.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will chang in J to honor RFC5424. (boolean
+# value)
+#use_syslog=false
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in keystone.openstack.common.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+
+[assignment]
+
+#
+# Options defined in keystone
+#
+
+# Assignment backend driver. (string value)
+#driver=<None>
+
+# Toggle for assignment caching. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# TTL (in seconds) to cache assignment data. This has no
+# effect unless global caching is enabled. (integer value)
+#cache_time=<None>
+
+# Maximum number of entities that will be returned in an
+# assignment collection. (integer value)
+#list_limit=<None>
+
+
+[auth]
+
+#
+# Options defined in keystone
+#
+
+# Default auth methods. (list value)
+#methods=external,password,token
+
+# The password auth plugin module. (string value)
+#password=keystone.auth.plugins.password.Password
+
+# The token auth plugin module. (string value)
+#token=keystone.auth.plugins.token.Token
+
+# The external (REMOTE_USER) auth plugin module. (string
+# value)
+#external=keystone.auth.plugins.external.DefaultDomain
+
+
+[cache]
+
+#
+# Options defined in keystone
+#
+
+# Prefix for building the configuration dictionary for the
+# cache region. This should not need to be changed unless
+# there is another dogpile.cache region with the same
+# configuration name. (string value)
+#config_prefix=cache.keystone
+
+# Default TTL, in seconds, for any cached item in the
+# dogpile.cache region. This applies to any cached method that
+# doesn't have an explicit cache expiration time defined for
+# it. (integer value)
+#expiration_time=600
+
+# Dogpile.cache backend module. It is recommended that
+# Memcache (dogpile.cache.memcached) or Redis
+# (dogpile.cache.redis) be used in production deployments.
+# Small workloads (single process) like devstack can use the
+# dogpile.cache.memory backend. (string value)
+#backend=keystone.common.cache.noop
+
+# Use a key-mangling function (sha1) to ensure fixed length
+# cache-keys. This is toggle-able for debugging purposes, it
+# is highly recommended to always leave this set to true.
+# (boolean value)
+#use_key_mangler=true
+
+# Arguments supplied to the backend module. Specify this
+# option once per argument to be passed to the dogpile.cache
+# backend. Example format: "<argname>:<value>". (multi valued)
+#backend_argument=
+
+# Proxy classes to import that will affect the way the
+# dogpile.cache backend functions. See the dogpile.cache
+# documentation on changing-backend-behavior. (list value)
+#proxies=
+
+# Global toggle for all caching using the should_cache_fn
+# mechanism. (boolean value)
+#enabled=false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls). This is only really useful if you
+# need to see the specific cache-backend get/set/delete calls
+# with the keys/values. Typically this should be left set to
+# false. (boolean value)
+#debug_cache_backend=false
+
+
+[catalog]
+
+#
+# Options defined in keystone
+#
+
+# Catalog template file name for use with the template catalog
+# backend. (string value)
+#template_file=default_catalog.templates
+
+# Catalog backend driver. (string value)
+#driver=keystone.catalog.backends.sql.Catalog
+
+# Maximum number of entities that will be returned in a
+# catalog collection. (integer value)
+#list_limit=<None>
+
+
+[credential]
+
+#
+# Options defined in keystone
+#
+
+# Credential backend driver. (string value)
+#driver=keystone.credential.backends.sql.Credential
+
+
+[database]
+
+#
+# Options defined in oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection=<None>
+connection=sqlite:////var/lib/keystone/keystone.sqlite
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost. (boolean value)
+#use_db_reconnect=false
+
+# Seconds between database connection retries. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between database connection
+# retries up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between
+# database connection retries. (integer value)
+#db_max_retry_interval=10
+
+# Maximum database connection retries before error is raised.
+# Set to -1 to specify an infinite retry count. (integer
+# value)
+#db_max_retries=20
+
+
+[ec2]
+
+#
+# Options defined in keystone
+#
+
+# EC2Credential backend driver. (string value)
+#driver=keystone.contrib.ec2.backends.kvs.Ec2
+
+
+[endpoint_filter]
+
+#
+# Options defined in keystone
+#
+
+# Endpoint Filter backend driver (string value)
+#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+
+# Toggle to return all active endpoints if no filter exists.
+# (boolean value)
+#return_all_endpoints_if_no_filter=true
+
+
+[federation]
+
+#
+# Options defined in keystone
+#
+
+# Federation backend driver. (string value)
+#driver=keystone.contrib.federation.backends.sql.Federation
+
+# Value to be used when filtering assertion parameters from
+# the environment. (string value)
+#assertion_prefix=
+
+
+[identity]
+
+#
+# Options defined in keystone
+#
+
+# This references the domain to use for all Identity API v2
+# requests (which are not aware of domains). A domain with
+# this ID will be created for you by keystone-manage db_sync
+# in migration 008. The domain referenced by this ID cannot be
+# deleted on the v3 API, to prevent accidentally breaking the
+# v2 API. There is nothing special about this domain, other
+# than the fact that it must exist to order to maintain
+# support for your v2 clients. (string value)
+#default_domain_id=default
+
+# A subset (or all) of domains can have their own identity
+# driver, each with their own partial configuration file in a
+# domain configuration directory. Only values specific to the
+# domain need to be placed in the domain specific
+# configuration file. This feature is disabled by default; set
+# to true to enable. (boolean value)
+#domain_specific_drivers_enabled=false
+
+# Path for Keystone to locate the domain specific identity
+# configuration files if domain_specific_drivers_enabled is
+# set to true. (string value)
+#domain_config_dir=/etc/keystone/domains
+
+# Identity backend driver. (string value)
+#driver=keystone.identity.backends.sql.Identity
+
+# Maximum supported length for user passwords; decrease to
+# improve performance. (integer value)
+#max_password_length=4096
+
+# Maximum number of entities that will be returned in an
+# identity collection. (integer value)
+#list_limit=<None>
+
+
+[kvs]
+
+#
+# Options defined in keystone
+#
+
+# Extra dogpile.cache backend modules to register with the
+# dogpile.cache library. (list value)
+#backends=
+
+# Prefix for building the configuration dictionary for the KVS
+# region. This should not need to be changed unless there is
+# another dogpile.cache region with the same configuration
+# name. (string value)
+#config_prefix=keystone.kvs
+
+# Toggle to disable using a key-mangling function to ensure
+# fixed length keys. This is toggle-able for debugging
+# purposes, it is highly recommended to always leave this set
+# to true. (boolean value)
+#enable_key_mangler=true
+
+# Default lock timeout for distributed locking. (integer
+# value)
+#default_lock_timeout=5
+
+
+[ldap]
+
+#
+# Options defined in keystone
+#
+
+# URL for connecting to the LDAP server. (string value)
+#url=ldap://keystone
+
+# User BindDN to query the LDAP server. (string value)
+#user=<None>
+
+# Password for the BindDN to query the LDAP server. (string
+# value)
+#password=<None>
+
+# LDAP server suffix (string value)
+#suffix=cn=example,cn=com
+
+# If true, will add a dummy member to groups. This is required
+# if the objectclass for groups requires the "member"
+# attribute. (boolean value)
+#use_dumb_member=false
+
+# DN of the "dummy member" to use when "use_dumb_member" is
+# enabled. (string value)
+#dumb_member=cn=dumb,dc=nonexistent
+
+# Delete subtrees using the subtree delete control. Only
+# enable this option if your LDAP server supports subtree
+# deletion. (boolean value)
+#allow_subtree_delete=false
+
+# The LDAP scope for queries, this can be either "one"
+# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree).
+# (string value)
+#query_scope=one
+
+# Maximum results per page; a value of zero ("0") disables
+# paging. (integer value)
+#page_size=0
+
+# The LDAP dereferencing option for queries. This can be
+# either "never", "searching", "always", "finding" or
+# "default". The "default" option falls back to using default
+# dereferencing configured by your ldap.conf. (string value)
+#alias_dereferencing=default
+
+# Sets the LDAP debugging level for LDAP calls. A value of 0
+# means that debugging is not enabled. This value is a
+# bitmask, consult your LDAP documentation for possible
+# values. (integer value)
+#debug_level=<None>
+
+# Override the system's default referral chasing behavior for
+# queries. (boolean value)
+#chase_referrals=<None>
+
+# Search base for users. (string value)
+#user_tree_dn=<None>
+
+# LDAP search filter for users. (string value)
+#user_filter=<None>
+
+# LDAP objectclass for users. (string value)
+#user_objectclass=inetOrgPerson
+
+# LDAP attribute mapped to user id. (string value)
+#user_id_attribute=cn
+
+# LDAP attribute mapped to user name. (string value)
+#user_name_attribute=sn
+
+# LDAP attribute mapped to user email. (string value)
+#user_mail_attribute=email
+
+# LDAP attribute mapped to password. (string value)
+#user_pass_attribute=userPassword
+
+# LDAP attribute mapped to user enabled flag. (string value)
+#user_enabled_attribute=enabled
+
+# Bitmask integer to indicate the bit that the enabled value
+# is stored in if the LDAP server represents "enabled" as a
+# bit on an integer rather than a boolean. A value of "0"
+# indicates the mask is not used. If this is not set to "0"
+# the typical value is "2". This is typically used when
+# "user_enabled_attribute = userAccountControl". (integer
+# value)
+#user_enabled_mask=0
+
+# Default value to enable users. This should match an
+# appropriate int value if the LDAP server uses non-boolean
+# (bitmask) values to indicate if a user is enabled or
+# disabled. If this is not set to "True" the typical value is
+# "512". This is typically used when "user_enabled_attribute =
+# userAccountControl". (string value)
+#user_enabled_default=True
+
+# List of attributes stripped off the user on update. (list
+# value)
+#user_attribute_ignore=default_project_id,tenants
+
+# LDAP attribute mapped to default_project_id for users.
+# (string value)
+#user_default_project_id_attribute=<None>
+
+# Allow user creation in LDAP backend. (boolean value)
+#user_allow_create=true
+
+# Allow user updates in LDAP backend. (boolean value)
+#user_allow_update=true
+
+# Allow user deletion in LDAP backend. (boolean value)
+#user_allow_delete=true
+
+# If true, Keystone uses an alternative method to determine if
+# a user is enabled or not by checking if they are a member of
+# the "user_enabled_emulation_dn" group. (boolean value)
+#user_enabled_emulation=false
+
+# DN of the group entry to hold enabled users when using
+# enabled emulation. (string value)
+#user_enabled_emulation_dn=<None>
+
+# List of additional LDAP attributes used for mapping
+# additional attribute mappings for users. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#user_additional_attribute_mapping=
+
+# Search base for projects (string value)
+# Deprecated group/name - [ldap]/tenant_tree_dn
+#project_tree_dn=<None>
+
+# LDAP search filter for projects. (string value)
+# Deprecated group/name - [ldap]/tenant_filter
+#project_filter=<None>
+
+# LDAP objectclass for projects. (string value)
+# Deprecated group/name - [ldap]/tenant_objectclass
+#project_objectclass=groupOfNames
+
+# LDAP attribute mapped to project id. (string value)
+# Deprecated group/name - [ldap]/tenant_id_attribute
+#project_id_attribute=cn
+
+# LDAP attribute mapped to project membership for user.
+# (string value)
+# Deprecated group/name - [ldap]/tenant_member_attribute
+#project_member_attribute=member
+
+# LDAP attribute mapped to project name. (string value)
+# Deprecated group/name - [ldap]/tenant_name_attribute
+#project_name_attribute=ou
+
+# LDAP attribute mapped to project description. (string value)
+# Deprecated group/name - [ldap]/tenant_desc_attribute
+#project_desc_attribute=description
+
+# LDAP attribute mapped to project enabled. (string value)
+# Deprecated group/name - [ldap]/tenant_enabled_attribute
+#project_enabled_attribute=enabled
+
+# LDAP attribute mapped to project domain_id. (string value)
+# Deprecated group/name - [ldap]/tenant_domain_id_attribute
+#project_domain_id_attribute=businessCategory
+
+# List of attributes stripped off the project on update. (list
+# value)
+# Deprecated group/name - [ldap]/tenant_attribute_ignore
+#project_attribute_ignore=
+
+# Allow project creation in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_create
+#project_allow_create=true
+
+# Allow project update in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_update
+#project_allow_update=true
+
+# Allow project deletion in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_delete
+#project_allow_delete=true
+
+# If true, Keystone uses an alternative method to determine if
+# a project is enabled or not by checking if they are a member
+# of the "project_enabled_emulation_dn" group. (boolean value)
+# Deprecated group/name - [ldap]/tenant_enabled_emulation
+#project_enabled_emulation=false
+
+# DN of the group entry to hold enabled projects when using
+# enabled emulation. (string value)
+# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn
+#project_enabled_emulation_dn=<None>
+
+# Additional attribute mappings for projects. Attribute
+# mapping format is <ldap_attr>:<user_attr>, where ldap_attr
+# is the attribute in the LDAP entry and user_attr is the
+# Identity API attribute. (list value)
+# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping
+#project_additional_attribute_mapping=
+
+# Search base for roles. (string value)
+#role_tree_dn=<None>
+
+# LDAP search filter for roles. (string value)
+#role_filter=<None>
+
+# LDAP objectclass for roles. (string value)
+#role_objectclass=organizationalRole
+
+# LDAP attribute mapped to role id. (string value)
+#role_id_attribute=cn
+
+# LDAP attribute mapped to role name. (string value)
+#role_name_attribute=ou
+
+# LDAP attribute mapped to role membership. (string value)
+#role_member_attribute=roleOccupant
+
+# List of attributes stripped off the role on update. (list
+# value)
+#role_attribute_ignore=
+
+# Allow role creation in LDAP backend. (boolean value)
+#role_allow_create=true
+
+# Allow role update in LDAP backend. (boolean value)
+#role_allow_update=true
+
+# Allow role deletion in LDAP backend. (boolean value)
+#role_allow_delete=true
+
+# Additional attribute mappings for roles. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#role_additional_attribute_mapping=
+
+# Search base for groups. (string value)
+#group_tree_dn=<None>
+
+# LDAP search filter for groups. (string value)
+#group_filter=<None>
+
+# LDAP objectclass for groups. (string value)
+#group_objectclass=groupOfNames
+
+# LDAP attribute mapped to group id. (string value)
+#group_id_attribute=cn
+
+# LDAP attribute mapped to group name. (string value)
+#group_name_attribute=ou
+
+# LDAP attribute mapped to show group membership. (string
+# value)
+#group_member_attribute=member
+
+# LDAP attribute mapped to group description. (string value)
+#group_desc_attribute=description
+
+# List of attributes stripped off the group on update. (list
+# value)
+#group_attribute_ignore=
+
+# Allow group creation in LDAP backend. (boolean value)
+#group_allow_create=true
+
+# Allow group update in LDAP backend. (boolean value)
+#group_allow_update=true
+
+# Allow group deletion in LDAP backend. (boolean value)
+#group_allow_delete=true
+
+# Additional attribute mappings for groups. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#group_additional_attribute_mapping=
+
+# CA certificate file path for communicating with LDAP
+# servers. (string value)
+#tls_cacertfile=<None>
+
+# CA certificate directory path for communicating with LDAP
+# servers. (string value)
+#tls_cacertdir=<None>
+
+# Enable TLS for communicating with LDAP servers. (boolean
+# value)
+#use_tls=false
+
+# Valid options for tls_req_cert are demand, never, and allow.
+# (string value)
+#tls_req_cert=demand
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[memcache]
+
+#
+# Options defined in keystone
+#
+
+# Memcache servers in the format of "host:port". (list value)
+#servers=localhost:11211
+
+# Number of compare-and-set attempts to make when using
+# compare-and-set in the token memcache back end. (integer
+# value)
+#max_compare_and_set_retry=16
+
+
+[oauth1]
+
+#
+# Options defined in keystone
+#
+
+# Credential backend driver. (string value)
+#driver=keystone.contrib.oauth1.backends.sql.OAuth1
+
+# Duration (in seconds) for the OAuth Request Token. (integer
+# value)
+#request_token_duration=28800
+
+# Duration (in seconds) for the OAuth Access Token. (integer
+# value)
+#access_token_duration=86400
+
+
+[os_inherit]
+
+#
+# Options defined in keystone
+#
+
+# role-assignment inheritance to projects from owning domain
+# can be optionally enabled. (boolean value)
+#enabled=false
+
+
+[paste_deploy]
+
+#
+# Options defined in keystone
+#
+
+# Name of the paste configuration file that defines the
+# available pipelines. (string value)
+#config_file=keystone-paste.ini
+
+
+[policy]
+
+#
+# Options defined in keystone
+#
+
+# Policy backend driver. (string value)
+#driver=keystone.policy.backends.sql.Policy
+
+# Maximum number of entities that will be returned in a policy
+# collection. (integer value)
+#list_limit=<None>
+
+
+[revoke]
+
+#
+# Options defined in keystone
+#
+
+# An implementation of the backend for persisting revocation
+# events. (string value)
+#driver=keystone.contrib.revoke.backends.kvs.Revoke
+
+# This value (calculated in seconds) is added to token
+# expiration before a revocation event may be removed from the
+# backend. (integer value)
+#expiration_buffer=1800
+
+# Toggle for revocation event cacheing. This has no effect
+# unless global caching is enabled. (boolean value)
+#caching=true
+
+
+[signing]
+
+#
+# Options defined in keystone
+#
+
+# Deprecated in favor of provider in the [token] section.
+# (string value)
+#token_format=<None>
+
+# Path of the certfile for token signing. For non-production
+# environments, you may be interested in using `keystone-
+# manage pki_setup` to generate self-signed certificates.
+# (string value)
+#certfile=/etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+#keyfile=/etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key for token signing. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Key size (in bits) for token signing cert (auto generated
+# certificate). (integer value)
+#key_size=2048
+
+# Days the token signing cert is valid for (auto generated
+# certificate). (integer value)
+#valid_days=3650
+
+# Certificate subject (auto generated certificate) for token
+# signing. (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# Options defined in keystone
+#
+
+# Toggle for SSL support on the Keystone eventlet servers.
+# (boolean value)
+#enable=false
+
+# Path of the certfile for SSL. For non-production
+# environments, you may be interested in using `keystone-
+# manage ssl_setup` to generate self-signed certificates.
+# (string value)
+#certfile=/etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+#keyfile=/etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the ca cert file for SSL. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key file for SSL. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Require client certificate. (boolean value)
+#cert_required=false
+
+# SSL key length (in bits) (auto generated certificate).
+# (integer value)
+#key_size=1024
+
+# Days the certificate is valid for once signed (auto
+# generated certificate). (integer value)
+#valid_days=3650
+
+# SSL certificate subject (auto generated certificate).
+# (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=keystone
+
+
+[stats]
+
+#
+# Options defined in keystone
+#
+
+# Stats backend driver. (string value)
+#driver=keystone.contrib.stats.backends.kvs.Stats
+
+
+[token]
+
+#
+# Options defined in keystone
+#
+
+# External auth mechanisms that should add bind information to
+# token, e.g., kerberos,x509. (list value)
+#bind=
+
+# Enforcement policy on tokens presented to Keystone with bind
+# information. One of disabled, permissive, strict, required
+# or a specifically required bind mode, e.g., kerberos or x509
+# to require binding to that authentication. (string value)
+#enforce_token_bind=permissive
+
+# Amount of time a token should remain valid (in seconds).
+# (integer value)
+#expiration=3600
+
+# Controls the token construction, validation, and revocation
+# operations. Core providers are
+# "keystone.token.providers.[pkiz|pki|uuid].Provider". The
+# default provider is pkiz. (string value)
+#provider=<None>
+
+# Token persistence backend driver. (string value)
+#driver=keystone.token.backends.sql.Token
+
+# Toggle for token system cacheing. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# Time to cache the revocation list and the revocation events
+# if revoke extension is enabled (in seconds). This has no
+# effect unless global and token caching are enabled. (integer
+# value)
+#revocation_cache_time=3600
+
+# Time to cache tokens (in seconds). This has no effect unless
+# global and token caching are enabled. (integer value)
+#cache_time=<None>
+
+# Revoke token by token identifier. Setting revoke_by_id to
+# true enables various forms of enumerating tokens, e.g. `list
+# tokens for user`. These enumerations are processed to
+# determine the list of tokens to revoke. Only disable if you
+# are switching to using the Revoke extension with a backend
+# other than KVS, which stores events in memory. (boolean
+# value)
+#revoke_by_id=true
+
+# The hash algorithm to use for PKI tokens. This can be set to
+# any algorithm that hashlib supports. WARNING: Before
+# changing this value, the auth_token middleware must be
+# configured with the hash_algorithms, otherwise token
+# revocation will not be processed correctly. (string value)
+#hash_algorithm=md5
+
+
+[trust]
+
+#
+# Options defined in keystone
+#
+
+# Delegation and impersonation features can be optionally
+# disabled. (boolean value)
+#enabled=true
+
+# Trust backend driver. (string value)
+#driver=keystone.trust.backends.sql.Trust
+
+
diff --git a/openstack/etc/keystone/logging.conf b/openstack/etc/keystone/logging.conf
new file mode 100644
index 00000000..7a538ae8
--- /dev/null
+++ b/openstack/etc/keystone/logging.conf
@@ -0,0 +1,39 @@
+[loggers]
+keys=root
+
+[formatters]
+keys=normal,normal_with_name,debug
+
+[handlers]
+keys=production,file,devel
+
+[logger_root]
+level=WARNING
+handlers=file
+
+[handler_production]
+class=handlers.SysLogHandler
+level=ERROR
+formatter=normal_with_name
+args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
+
+[handler_file]
+class=FileHandler
+level=DEBUG
+formatter=normal_with_name
+args=('/var/log/keystone/keystone.log', 'a')
+
+[handler_devel]
+class=StreamHandler
+level=NOTSET
+formatter=debug
+args=(sys.stdout,)
+
+[formatter_normal]
+format=%(asctime)s %(levelname)s %(message)s
+
+[formatter_normal_with_name]
+format=(%(name)s): %(asctime)s %(levelname)s %(message)s
+
+[formatter_debug]
+format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
diff --git a/openstack/etc/keystone/policy.json b/openstack/etc/keystone/policy.json
new file mode 100644
index 00000000..9c7e646e
--- /dev/null
+++ b/openstack/etc/keystone/policy.json
@@ -0,0 +1,144 @@
+{
+ "admin_required": "role:admin or is_admin:1",
+ "service_role": "role:service",
+ "service_or_admin": "rule:admin_required or rule:service_role",
+ "owner" : "user_id:%(user_id)s",
+ "admin_or_owner": "rule:admin_required or rule:owner",
+
+ "default": "rule:admin_required",
+
+ "identity:get_region": "",
+ "identity:list_regions": "",
+ "identity:create_region": "rule:admin_required",
+ "identity:update_region": "rule:admin_required",
+ "identity:delete_region": "rule:admin_required",
+
+ "identity:get_service": "rule:admin_required",
+ "identity:list_services": "rule:admin_required",
+ "identity:create_service": "rule:admin_required",
+ "identity:update_service": "rule:admin_required",
+ "identity:delete_service": "rule:admin_required",
+
+ "identity:get_endpoint": "rule:admin_required",
+ "identity:list_endpoints": "rule:admin_required",
+ "identity:create_endpoint": "rule:admin_required",
+ "identity:update_endpoint": "rule:admin_required",
+ "identity:delete_endpoint": "rule:admin_required",
+
+ "identity:get_domain": "rule:admin_required",
+ "identity:list_domains": "rule:admin_required",
+ "identity:create_domain": "rule:admin_required",
+ "identity:update_domain": "rule:admin_required",
+ "identity:delete_domain": "rule:admin_required",
+
+ "identity:get_project": "rule:admin_required",
+ "identity:list_projects": "rule:admin_required",
+ "identity:list_user_projects": "rule:admin_or_owner",
+ "identity:create_project": "rule:admin_required",
+ "identity:update_project": "rule:admin_required",
+ "identity:delete_project": "rule:admin_required",
+
+ "identity:get_user": "rule:admin_required",
+ "identity:list_users": "rule:admin_required",
+ "identity:create_user": "rule:admin_required",
+ "identity:update_user": "rule:admin_required",
+ "identity:delete_user": "rule:admin_required",
+ "identity:change_password": "rule:admin_or_owner",
+
+ "identity:get_group": "rule:admin_required",
+ "identity:list_groups": "rule:admin_required",
+ "identity:list_groups_for_user": "rule:admin_or_owner",
+ "identity:create_group": "rule:admin_required",
+ "identity:update_group": "rule:admin_required",
+ "identity:delete_group": "rule:admin_required",
+ "identity:list_users_in_group": "rule:admin_required",
+ "identity:remove_user_from_group": "rule:admin_required",
+ "identity:check_user_in_group": "rule:admin_required",
+ "identity:add_user_to_group": "rule:admin_required",
+
+ "identity:get_credential": "rule:admin_required",
+ "identity:list_credentials": "rule:admin_required",
+ "identity:create_credential": "rule:admin_required",
+ "identity:update_credential": "rule:admin_required",
+ "identity:delete_credential": "rule:admin_required",
+
+ "identity:ec2_get_credential": "rule:admin_or_owner",
+ "identity:ec2_list_credentials": "rule:admin_or_owner",
+ "identity:ec2_create_credential": "rule:admin_or_owner",
+ "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
+
+ "identity:get_role": "rule:admin_required",
+ "identity:list_roles": "rule:admin_required",
+ "identity:create_role": "rule:admin_required",
+ "identity:update_role": "rule:admin_required",
+ "identity:delete_role": "rule:admin_required",
+
+ "identity:check_grant": "rule:admin_required",
+ "identity:list_grants": "rule:admin_required",
+ "identity:create_grant": "rule:admin_required",
+ "identity:revoke_grant": "rule:admin_required",
+
+ "identity:list_role_assignments": "rule:admin_required",
+
+ "identity:get_policy": "rule:admin_required",
+ "identity:list_policies": "rule:admin_required",
+ "identity:create_policy": "rule:admin_required",
+ "identity:update_policy": "rule:admin_required",
+ "identity:delete_policy": "rule:admin_required",
+
+ "identity:check_token": "rule:admin_required",
+ "identity:validate_token": "rule:service_or_admin",
+ "identity:validate_token_head": "rule:service_or_admin",
+ "identity:revocation_list": "rule:service_or_admin",
+ "identity:revoke_token": "rule:admin_or_owner",
+
+ "identity:create_trust": "user_id:%(trust.trustor_user_id)s",
+ "identity:get_trust": "rule:admin_or_owner",
+ "identity:list_trusts": "",
+ "identity:list_roles_for_trust": "",
+ "identity:check_role_for_trust": "",
+ "identity:get_role_for_trust": "",
+ "identity:delete_trust": "",
+
+ "identity:create_consumer": "rule:admin_required",
+ "identity:get_consumer": "rule:admin_required",
+ "identity:list_consumers": "rule:admin_required",
+ "identity:delete_consumer": "rule:admin_required",
+ "identity:update_consumer": "rule:admin_required",
+
+ "identity:authorize_request_token": "rule:admin_required",
+ "identity:list_access_token_roles": "rule:admin_required",
+ "identity:get_access_token_role": "rule:admin_required",
+ "identity:list_access_tokens": "rule:admin_required",
+ "identity:get_access_token": "rule:admin_required",
+ "identity:delete_access_token": "rule:admin_required",
+
+ "identity:list_projects_for_endpoint": "rule:admin_required",
+ "identity:add_endpoint_to_project": "rule:admin_required",
+ "identity:check_endpoint_in_project": "rule:admin_required",
+ "identity:list_endpoints_for_project": "rule:admin_required",
+ "identity:remove_endpoint_from_project": "rule:admin_required",
+
+ "identity:create_identity_provider": "rule:admin_required",
+ "identity:list_identity_providers": "rule:admin_required",
+ "identity:get_identity_providers": "rule:admin_required",
+ "identity:update_identity_provider": "rule:admin_required",
+ "identity:delete_identity_provider": "rule:admin_required",
+
+ "identity:create_protocol": "rule:admin_required",
+ "identity:update_protocol": "rule:admin_required",
+ "identity:get_protocol": "rule:admin_required",
+ "identity:list_protocols": "rule:admin_required",
+ "identity:delete_protocol": "rule:admin_required",
+
+ "identity:create_mapping": "rule:admin_required",
+ "identity:get_mapping": "rule:admin_required",
+ "identity:list_mappings": "rule:admin_required",
+ "identity:delete_mapping": "rule:admin_required",
+ "identity:update_mapping": "rule:admin_required",
+
+ "identity:list_projects_for_groups": "",
+ "identity:list_domains_for_groups": "",
+
+ "identity:list_revoke_events": ""
+}
diff --git a/openstack/etc/logrotate.d/openstack-glance-api b/openstack/etc/logrotate.d/openstack-glance-api
new file mode 100644
index 00000000..fa9b0881
--- /dev/null
+++ b/openstack/etc/logrotate.d/openstack-glance-api
@@ -0,0 +1,7 @@
+/var/log/glance/glance-api.log {
+ daily
+ missingok
+ compress
+ delaycompress
+ notifempty
+}
diff --git a/openstack/etc/logrotate.d/openstack-glance-registry b/openstack/etc/logrotate.d/openstack-glance-registry
new file mode 100644
index 00000000..830fa793
--- /dev/null
+++ b/openstack/etc/logrotate.d/openstack-glance-registry
@@ -0,0 +1,7 @@
+/var/log/glance/glance-registry.log {
+ daily
+ missingok
+ compress
+ delaycompress
+ notifempty
+}
diff --git a/openstack/etc/logrotate.d/openstack-keystone b/openstack/etc/logrotate.d/openstack-keystone
new file mode 100644
index 00000000..2709c72a
--- /dev/null
+++ b/openstack/etc/logrotate.d/openstack-keystone
@@ -0,0 +1,8 @@
+/var/log/keystone/*.log {
+ daily
+ missingok
+ rotate 5
+ compress
+ minsize 100k
+ copytruncate
+} \ No newline at end of file
diff --git a/openstack/etc/nova/api-paste.ini b/openstack/etc/nova/api-paste.ini
new file mode 100644
index 00000000..5f50e8a6
--- /dev/null
+++ b/openstack/etc/nova/api-paste.ini
@@ -0,0 +1,118 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = ec2faultwrap logrequest metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#######
+# EC2 #
+#######
+
+[composite:ec2]
+use = egg:Paste#urlmap
+/services/Cloud: ec2cloud
+
+[composite:ec2cloud]
+use = call:nova.api.auth:pipeline_factory
+noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
+
+[filter:logrequest]
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
+
+[filter:ec2lockout]
+paste.filter_factory = nova.api.ec2:Lockout.factory
+
+[filter:ec2keystoneauth]
+paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
+
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
+[filter:cloudrequest]
+controller = nova.api.ec2.cloud.CloudController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:Authorizer.factory
+
+[filter:validator]
+paste.filter_factory = nova.api.ec2:Validator.factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:Executor.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+/v1.1: openstack_compute_api_v2
+/v2: openstack_compute_api_v2
+/v3: openstack_compute_api_v3
+
+[composite:openstack_compute_api_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
+keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
+
+[composite:openstack_compute_api_v3]
+use = call:nova.api.auth:pipeline_factory_v3
+noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
+
+[filter:request_id]
+paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:noauth_v3]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory
+
+[filter:ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
+
+[app:osapi_compute_app_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v3]
+paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
diff --git a/openstack/etc/nova/cells.json b/openstack/etc/nova/cells.json
new file mode 100644
index 00000000..cc74930d
--- /dev/null
+++ b/openstack/etc/nova/cells.json
@@ -0,0 +1,26 @@
+{
+ "parent": {
+ "name": "parent",
+ "api_url": "http://api.example.com:8774",
+ "transport_url": "rabbit://rabbit.example.com",
+ "weight_offset": 0.0,
+ "weight_scale": 1.0,
+ "is_parent": true
+ },
+ "cell1": {
+ "name": "cell1",
+ "api_url": "http://api.example.com:8774",
+ "transport_url": "rabbit://rabbit1.example.com",
+ "weight_offset": 0.0,
+ "weight_scale": 1.0,
+ "is_parent": false
+ },
+ "cell2": {
+ "name": "cell2",
+ "api_url": "http://api.example.com:8774",
+ "transport_url": "rabbit://rabbit2.example.com",
+ "weight_offset": 0.0,
+ "weight_scale": 1.0,
+ "is_parent": false
+ }
+}
diff --git a/openstack/etc/nova/logging.conf b/openstack/etc/nova/logging.conf
new file mode 100644
index 00000000..5482a040
--- /dev/null
+++ b/openstack/etc/nova/logging.conf
@@ -0,0 +1,81 @@
+[loggers]
+keys = root, nova
+
+[handlers]
+keys = stderr, stdout, watchedfile, syslog, null
+
+[formatters]
+keys = context, default
+
+[logger_root]
+level = WARNING
+handlers = null
+
+[logger_nova]
+level = INFO
+handlers = stderr
+qualname = nova
+
+[logger_amqp]
+level = WARNING
+handlers = stderr
+qualname = amqp
+
+[logger_amqplib]
+level = WARNING
+handlers = stderr
+qualname = amqplib
+
+[logger_sqlalchemy]
+level = WARNING
+handlers = stderr
+qualname = sqlalchemy
+# "level = INFO" logs SQL queries.
+# "level = DEBUG" logs SQL queries and results.
+# "level = WARNING" logs neither. (Recommended for production systems.)
+
+[logger_boto]
+level = WARNING
+handlers = stderr
+qualname = boto
+
+[logger_suds]
+level = INFO
+handlers = stderr
+qualname = suds
+
+[logger_eventletwsgi]
+level = WARNING
+handlers = stderr
+qualname = eventlet.wsgi.server
+
+[handler_stderr]
+class = StreamHandler
+args = (sys.stderr,)
+formatter = context
+
+[handler_stdout]
+class = StreamHandler
+args = (sys.stdout,)
+formatter = context
+
+[handler_watchedfile]
+class = handlers.WatchedFileHandler
+args = ('nova.log',)
+formatter = context
+
+[handler_syslog]
+class = handlers.SysLogHandler
+args = ('/dev/log', handlers.SysLogHandler.LOG_USER)
+formatter = context
+
+[handler_null]
+class = nova.openstack.common.log.NullHandler
+formatter = default
+args = ()
+
+[formatter_context]
+class = nova.openstack.common.log.ContextFormatter
+
+[formatter_default]
+format = %(message)s
diff --git a/openstack/etc/nova/nova.conf b/openstack/etc/nova/nova.conf
new file mode 100644
index 00000000..d363a979
--- /dev/null
+++ b/openstack/etc/nova/nova.conf
@@ -0,0 +1,597 @@
+# Full list of options available at: http://wiki.openstack.org/NovaConfigOptions
+[DEFAULT]
+
+### nova.availability_zones ###
+###############################
+# availability_zone to show internal services under (string value)
+#internal_service_availability_zone=internal
+
+# default compute node availability_zone (string value)
+#default_availability_zone=nova
+
+### nova.crypto ###
+###################
+# Filename of root CA (string value)
+#ca_file=cacert.pem
+
+# Filename of private key (string value)
+#key_file=private/cakey.pem
+
+# Filename of root Certificate Revocation List (string value)
+#crl_file=crl.pem
+
+# Where we keep our keys (string value)
+#keys_path=$state_path/keys
+
+# Where we keep our root CA (string value)
+#ca_path=$state_path/CA
+
+# Should we use a CA for each project? (boolean value)
+#use_project_ca=false
+
+# Subject for certificate for users, %s for project, user,
+# timestamp (string value)
+#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+# Subject for certificate for projects, %s for project,
+# timestamp (string value)
+#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+### nova.exception ###
+# make exception message format errors fatal (boolean value)
+#fatal_exception_format_errors=false
+
+### nova.manager ###
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+#############################
+# Mandatory general options #
+#############################
+# ip address of this host (string value)
+my_ip=##NOVA_HOST##
+#use_ipv6=false
+
+
+########
+# APIs #
+########
+# Selects the type of APIs you want to activate.
+# Each API will bind on a specific port.
+# Compute nodes should run only the metadata API,
+# a nova API endpoint node should run osapi_compute.
+# If you want to use nova-volume you can also enable
+# osapi_volume, but if you want to run cinder, do not
+# activate it.
+# The list of API is: ec2,osapi_compute,metadata,osapi_volume
+enabled_apis=ec2,osapi_compute,metadata
+
+# NOVA API #
+# # # # # #
+#osapi_compute_listen="0.0.0.0"
+#osapi_compute_listen_port=8774
+
+#api_paste_config=api-paste.ini
+
+# Allows use of instance password during server creation
+#enable_instance_password=true
+
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address (string value)
+#host="firefly-2.local"
+
+#######################
+# Nova API extentions #
+#######################
+# osapi compute extension to load (multi valued)
+#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
+
+# Specify list of extensions to load when using
+# osapi_compute_extension option with
+# nova.api.openstack.compute.contrib.select_extensions (list value)
+#osapi_compute_ext_list=""
+
+# Permit instance snapshot operations. (boolean value)
+#allow_instance_snapshots=true
+
+# S3 #
+# # #
+#s3_host=$my_ip
+#s3_port=3333
+
+# EC2 API #
+# # # # # #
+#ec2_host="$my_ip"
+#ec2_dmz_host="$my_ip"
+#ec2_private_dns_show_ip=True
+#ec2_path="/services/Cloud"
+#ec2_port=8773
+# the protocol to use when connecting to the ec2 api server (http, https) (string value)
+#ec2_scheme=http
+
+# port and IP for ec2 api to listen
+#ec2_listen="0.0.0.0"
+#ec2_listen_port=8773
+
+# Metadata API #
+# # # # # # # #
+#metadata_host=$my_ip
+#metadata_port=8775
+#metadata_listen=0.0.0.0
+
+########
+# MISC #
+########
+#resume_guests_state_on_host_boot=false
+#instance_name_template="instance-%08x"
+# Inject the admin password at boot time, without an agent.
+#libvirt_inject_password=false
+
+########
+# LOGS #
+########
+#log-date-format="%Y-%m-%d %H:%M:%S"
+#debug=false
+
+##########
+# SYSTEM #
+##########
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+rootwrap_config=/etc/nova/rootwrap.conf
+#memcached_servers=<None>
+
+##################
+# AUTHENTICATION #
+##################
+auth_strategy=keystone
+# Seconds for auth tokens to linger
+
+#############
+# SCHEDULER #
+#############
+#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
+
+####################
+# VOLUMES / CINDER #
+####################
+# The full class name of the volume API class to use (string value)
+#volume_api_class=nova.volume.cinder.API
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure=false
+
+# Allow attach between instance and volume in different
+# availability zones. (boolean value)
+#cinder_cross_az_attach=true
+
+# Libvirt handlers for remote volumes. (list value)
+#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver
+
+############
+# RABBITMQ #
+############
+rabbit_host = ##RABBIT_HOST##
+#fake_rabbit=false
+#rabbit_virtual_host=/
+rabbit_userid = ##RABBIT_USERID##
+rabbit_password = ##RABBIT_PASSWORD##
+#rabbit_port=5672
+#rabbit_use_ssl=false
+#rabbit_retry_interval=1
+# The messaging module to use, defaults to kombu (works for rabbit).
+# You can also use qpid: nova.rpc.impl_qpid
+rpc_backend = nova.openstack.common.rpc.impl_kombu
+
+##########
+# GLANCE #
+##########
+glance_host=##GLANCE_HOST##
+#glance_port=9292
+#glance_protocol=http
+
+# A list of the glance api servers available to nova. Prefix
+# with https:// for ssl-based glance api servers.
+# ([hostname|ip]:port) (list value)
+#glance_api_servers=$glance_host:$glance_port
+#glance_api_servers=192.168.0.1:9292
+
+# Allow to perform insecure SSL (https) requests to glance (boolean value)
+#glance_api_insecure=false
+
+# Cache glance images locally
+#cache_images=true
+# Number retries when downloading an image from glance (integer value)
+#glance_num_retries=0
+
+###############################
+# Type of network APIs to use #
+###############################
+# The full class name of the network API class to use (string value)
+# Possible values are:
+# nova.network.api.API (if you wish to use nova-network)
+# nova.network.neutronv2.api.API (if you want to use Neutron)
+network_api_class=nova.network.neutronv2.api.API
+
+# Type of security group API. Possible values are:
+# nova (if you are using nova-network)
+# neutron (if you use neutron)
+security_group_api = neutron
+
+# Driver used to create ethernet devices. (string value)
+# When using linux net, use: nova.network.linux_net.LinuxBridgeInterfaceDriver
+# for Neutron, use: nova.network.linux_net.LinuxOVSInterfaceDriver
+linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
+
+# Firewall type to use. (defaults to hypervisor specific iptables driver) (string value)
+# For linux net, use: nova.virt.libvirt.firewall.IptablesFirewallDriver
+# For Neutron and OVS, use: nova.virt.firewall.NoopFirewallDriver (since this is handled by Neutron)
+###firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+
+#######################
+# NETWORK (linux net) #
+#######################
+#network_manager=nova.network.manager.VlanManager
+#force_dhcp_release=false
+#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
+#dhcpbridge=$bindir/nova-dhcpbridge
+#dhcp_lease_time=120
+# Firewall driver (defaults to hypervisor specific iptables driver) (string value)
+#firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
+# Interface for public IP addresses (default: eth0) (string value)
+#public_interface=br-ext
+# vlans will bridge into this interface if set (default: <None>) (string value)
+# FlatDhcp will bridge into this interface if set (default: <None>) (string value)
+#vlan_interface=eth1
+# Bridge for simple network instances (default: <None>) (string value)
+#flat_network_bridge=br100
+# FlatDhcp will bridge into this interface if set (default: <None>) (string value)
+#flat_interface=eth0
+
+# set it to the /32 of your metadata server if you have just one
+# It is a cidr in case there are multiple services that you want
+# to keep using the internal private ips.
+# A list of dmz range that should be accepted (list value)
+#dmz_cidr=169.254.169.254/32
+# Name of Open vSwitch bridge used with linuxnet (string value)
+#linuxnet_ovs_integration_bridge="br-int"
+#routing_source_ip="$my_ip"
+# Only first nic of vm will get default gateway from dhcp server
+#use_single_default_gateway=false
+
+###########
+# Neutron #
+###########
+# This is the URL of your neutron server:
+neutron_url=##NOVA_INTERNAL_URL##
+#neutron_auth_strategy=keystone
+neutron_admin_tenant_name=admin
+neutron_admin_username=##NOVA_SERVICE_USER##
+neutron_admin_password=##NOVA_SERVICE_PASSWORD##
+# This is the URL of your Keystone server
+#neutron_admin_auth_url=http://127.0.0.1:35357/v2.0
+
+# What's below is only needed for nova-compute.
+
+# Set flag to indicate Neutron will proxy metadata requests
+# and resolve instance ids. This is needed to use neutron-metadata-agent
+# (instead of the metadata server of nova-api,
+# which doesn't work with neutron) (boolean value)
+#service_neutron_metadata_proxy=True
+
+# Shared secret to validate proxies Neutron metadata requests
+# This password should match what is in /etc/neutron/metadata_agent.ini
+# (string value)
+#neutron_metadata_proxy_shared_secret=
+
+#################
+# NOVNC CONSOLE #
+#################
+# By default with the Debian package, the spicehtml5 console is the default. To
+# enable the NoVNC mode, enable the switch below, disable SPICE in this
+# nova.conf file as well (see far below), then edit
+# /etc/default/nova-consoleproxy to switch to NoVNC, shutdown the SPICE with
+# /etc/init.d/nova-spicehtml5proxy stop, and finally start nova-novncproxy.
+# Do not forget to restart Nova daemons and restart your VMs if you want to use
+# NoVNC form now on (VMs video card needs to be attached to a console type, and
+# they can accept only one video card at a time).
+vnc_enabled=false
+#novncproxy_base_url=http://192.168.0.1:6080/vnc_auto.html
+# Change vncserver_proxyclient_address and vncserver_listen to match each compute host
+#vncserver_proxyclient_address=127.0.0.1
+vncserver_listen=0.0.0.0
+vnc_keymap="en-us"
+
+######################################
+# nova-xenvncproxy (eg: xvpvncproxy) #
+######################################
+# See NoVNC comments above for switching away from SPICE to XVP
+#xvpvncproxy_host="0.0.0.0"
+#xvpvncproxy_port=6081
+
+#########
+# QUOTA #
+#########
+# number of instances allowed per project (integer value)
+#quota_instances=10
+# number of instance cores allowed per project (integer value)
+#quota_cores=20
+# megabytes of instance ram allowed per project (integer value)
+#quota_ram=51200
+# number of floating ips allowed per project (integer value)
+#quota_floating_ips=10
+# number of metadata items allowed per instance (integer value)
+#quota_metadata_items=128
+# number of injected files allowed (integer value)
+#quota_injected_files=5
+# number of bytes allowed per injected file (integer value)
+#quota_injected_file_content_bytes=10240
+# number of bytes allowed per injected file path (integer value)
+#quota_injected_file_path_bytes=255
+# number of security groups per project (integer value)
+#quota_security_groups=10
+# number of security rules per security group (integer value)
+#quota_security_group_rules=20
+# number of key pairs per user (integer value)
+#quota_key_pairs=100
+# number of seconds until a reservation expires (integer value)
+#reservation_expire=86400
+# count of reservations until usage is refreshed (integer value)
+#until_refresh=0
+# number of seconds between subsequent usage refreshes (integer value)
+#max_age=0
+# default driver to use for quota checks (string value)
+#quota_driver=nova.quota.DbQuotaDriver
+
+############
+# DATABASE #
+############
+[database]
+connection=sqlite:////var/lib/nova/novadb
+
+
+#############
+# CONDUCTOR #
+#############
+[conductor]
+# Perform nova-conductor operations locally (boolean value)
+#use_local=false
+# the topic conductor nodes listen on (string value)
+#topic=conductor
+# full class name for the Manager for conductor (string value)
+#manager=nova.conductor.manager.ConductorManager
+
+#########
+# CELLS #
+#########
+[cells]
+# Cells communication driver to use (string value)
+#driver=nova.cells.rpc_driver.CellsRPCDriver
+
+# Number of seconds after an instance was updated or deleted
+# to continue to update cells (integer value)
+#instance_updated_at_threshold=3600
+
+# Number of instances to update per periodic task run (integer
+# value)
+#instance_update_num_instances=1
+
+# Maximum number of hops for cells routing. (integer value)
+#max_hop_count=10
+
+# Cells scheduler to use (string value)
+#scheduler=nova.cells.scheduler.CellsScheduler
+
+# Enable cell functionality (boolean value)
+#enable=false
+
+# the topic cells nodes listen on (string value)
+#topic=cells
+
+# Manager for cells (string value)
+#manager=nova.cells.manager.CellsManager
+
+# name of this cell (string value)
+#name=nova
+
+# Key/Multi-value list with the capabilities of the cell (list
+# value)
+#capabilities=hypervisor=xenserver;kvm,os=linux;windows
+
+# Seconds to wait for response from a call to a cell. (integer
+# value)
+#call_timeout=60
+
+# Percentage of cell capacity to hold in reserve. Affects both
+# memory and disk utilization (floating point value)
+#reserve_percent=10.0
+
+# Type of cell: api or compute (string value)
+#cell_type=<None>
+
+# Base queue name to use when communicating between cells.
+# Various topics by message type will be appended to this.
+# (string value)
+#rpc_driver_queue_base=cells.intercell
+
+# Filter classes the cells scheduler should use. An entry of
+# "nova.cells.filters.all_filters"maps to all cells filters
+# included with nova. (list value)
+#scheduler_filter_classes=nova.cells.filters.all_filters
+
+# Weigher classes the cells scheduler should use. An entry of
+# "nova.cells.weights.all_weighers"maps to all cell weighers
+# included with nova. (list value)
+#scheduler_weight_classes=nova.cells.weights.all_weighers
+
+# How many retries when no cells are available. (integer
+# value)
+#scheduler_retries=10
+
+# How often to retry in seconds when no cells are available.
+# (integer value)
+#scheduler_retry_delay=2
+
+# Seconds between getting fresh cell info from db. (integer
+# value)
+#db_check_interval=60
+
+# Multiplier used to weigh mute children. (The value should
+# be negative.) (floating point value)
+#mute_weight_multiplier=-10.0
+
+# Weight value assigned to mute children. (The value should
+# be positive.) (floating point value)
+#mute_weight_value=1000.0
+
+# Number of seconds after which a lack of capability and
+# capacity updates signals the child cell is to be treated as
+# a mute. (integer value)
+#mute_child_interval=300
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=10.0
+
+#############
+# BAREMETAL #
+#############
+[baremetal]
+# The backend to use for bare-metal database (string value)
+#db_backend=sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# bare-metal database (string value)
+#sql_connection=sqlite:///$state_path/baremetal_$sqlite_db
+
+# Whether baremetal compute injects password or not (boolean value)
+#inject_password=true
+
+# Template file for injected network (string value)
+#injected_network_template=$pybasedir/nova/virt/baremetal/interfaces.template
+
+# Baremetal VIF driver. (string value)
+#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver
+
+# Baremetal volume driver. (string value)
+#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver
+
+# a list of additional capabilities corresponding to
+# instance_type_extra_specs for this compute host to
+# advertise. Valid entries are name=value, pairs For example,
+# "key1:val1, key2:val2" (list value)
+#instance_type_extra_specs=
+
+# Baremetal driver back-end (pxe or tilera) (string value)
+#driver=nova.virt.baremetal.pxe.PXE
+
+# Baremetal power management method (string value)
+#power_manager=nova.virt.baremetal.ipmi.IPMI
+
+# Baremetal compute node's tftp root path (string value)
+#tftp_root=/tftpboot
+
+# path to baremetal terminal program (string value)
+#terminal=shellinaboxd
+
+# path to baremetal terminal SSL cert(PEM) (string value)
+#terminal_cert_dir=<None>
+
+# path to directory stores pidfiles of baremetal_terminal
+# (string value)
+#terminal_pid_dir=$state_path/baremetal/console
+
+# maximal number of retries for IPMI operations (integer
+# value)
+#ipmi_power_retry=5
+
+# Default kernel image ID used in deployment phase (string
+# value)
+#deploy_kernel=<None>
+
+# Default ramdisk image ID used in deployment phase (string
+# value)
+#deploy_ramdisk=<None>
+
+# Template file for injected network config (string value)
+#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template
+
+# additional append parameters for baremetal PXE boot (string
+# value)
+#pxe_append_params=<None>
+
+# Template file for PXE configuration (string value)
+#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template
+
+# Timeout for PXE deployments. Default: 0 (unlimited) (integer
+# value)
+#pxe_deploy_timeout=0
+
+# ip or name to virtual power host (string value)
+#virtual_power_ssh_host=
+
+# base command to use for virtual power(vbox,virsh) (string
+# value)
+#virtual_power_type=vbox
+
+# user to execute virtual power commands as (string value)
+#virtual_power_host_user=
+
+# password for virtual power host_user (string value)
+#virtual_power_host_pass=
+
+# Do not set this out of dev/test environments. If a node does
+# not have a fixed PXE IP address, volumes are exported with
+# globally opened ACL (boolean value)
+#use_unsafe_iscsi=false
+
+# iSCSI IQN prefix used in baremetal volume connections.
+# (string value)
+#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal
+
+##########
+# VMWARE #
+##########
+[vmware]
+# Name of Integration Bridge (string value)
+#integration_bridge=br-int
+
+#########
+# SPICE #
+#########
+[spice]
+# location of spice html5 console proxy, in the form
+# "http://www.example.com:6082/spice_auto.html" (string value)
+html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html
+
+# IP address on which instance spice server should listen (string value)
+server_listen=0.0.0.0
+
+# the address to which proxy clients (like nova-spicehtml5proxy) should connect (string value)
+server_proxyclient_address=$my_ip
+
+# enable spice related features (boolean value)
+enabled=true
+
+# enable spice guest agent support (boolean value)
+#agent_enabled=true
+
+# keymap for spice (string value)
+#keymap=en-us
+
+######################
+# Keystone authtoken #
+######################
+[keystone_authtoken]
+auth_host = 127.0.0.1
+auth_port = 35357
+auth_protocol = http
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
+auth_version = v2.0
diff --git a/openstack/etc/nova/nova.conf.example b/openstack/etc/nova/nova.conf.example
new file mode 100644
index 00000000..6b35ba91
--- /dev/null
+++ b/openstack/etc/nova/nova.conf.example
@@ -0,0 +1,3698 @@
+
+
+[DEFAULT]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+#rabbit_userid=guest
+
+# The RabbitMQ password. (string value)
+#rabbit_password=guest
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=nova
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in nova.availability_zones
+#
+
+# The availability_zone to show internal services under
+# (string value)
+#internal_service_availability_zone=internal
+
+# Default compute node availability_zone (string value)
+#default_availability_zone=nova
+
+
+#
+# Options defined in nova.crypto
+#
+
+# Filename of root CA (string value)
+#ca_file=cacert.pem
+
+# Filename of private key (string value)
+#key_file=private/cakey.pem
+
+# Filename of root Certificate Revocation List (string value)
+#crl_file=crl.pem
+
+# Where we keep our keys (string value)
+#keys_path=$state_path/keys
+
+# Where we keep our root CA (string value)
+#ca_path=$state_path/CA
+
+# Should we use a CA for each project? (boolean value)
+#use_project_ca=false
+
+# Subject for certificate for users, %s for project, user,
+# timestamp (string value)
+#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+# Subject for certificate for projects, %s for project,
+# timestamp (string value)
+#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+
+#
+# Options defined in nova.exception
+#
+
+# Make exception message format errors fatal (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in nova.netconf
+#
+
+# IP address of this host (string value)
+#my_ip=10.0.0.1
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address (string value)
+#host=nova
+
+# Use IPv6 (boolean value)
+#use_ipv6=false
+
+
+#
+# Options defined in nova.notifications
+#
+
+# If set, send compute.instance.update notifications on
+# instance state changes. Valid values are None for no
+# notifications, "vm_state" for notifications on VM state
+# changes, or "vm_and_task_state" for notifications on VM and
+# task state changes. (string value)
+#notify_on_state_change=<None>
+
+# If set, send api.fault notifications on caught exceptions in
+# the API service. (boolean value)
+#notify_api_faults=false
+
+# Default notification level for outgoing notifications
+# (string value)
+#default_notification_level=INFO
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in nova.paths
+#
+
+# Directory where the nova python module is installed (string
+# value)
+#pybasedir=/usr/lib/python/site-packages
+
+# Directory where nova binaries are installed (string value)
+#bindir=/usr/local/bin
+
+# Top-level directory for maintaining nova's state (string
+# value)
+#state_path=$pybasedir
+
+
+#
+# Options defined in nova.policy
+#
+
+# JSON file representing policy (string value)
+#policy_file=policy.json
+
+# Rule checked when requested rule is not found (string value)
+#policy_default_rule=default
+
+
+#
+# Options defined in nova.quota
+#
+
+# Number of instances allowed per project (integer value)
+#quota_instances=10
+
+# Number of instance cores allowed per project (integer value)
+#quota_cores=20
+
+# Megabytes of instance RAM allowed per project (integer
+# value)
+#quota_ram=51200
+
+# Number of floating IPs allowed per project (integer value)
+#quota_floating_ips=10
+
+# Number of fixed IPs allowed per project (this should be at
+# least the number of instances allowed) (integer value)
+#quota_fixed_ips=-1
+
+# Number of metadata items allowed per instance (integer
+# value)
+#quota_metadata_items=128
+
+# Number of injected files allowed (integer value)
+#quota_injected_files=5
+
+# Number of bytes allowed per injected file (integer value)
+#quota_injected_file_content_bytes=10240
+
+# Number of bytes allowed per injected file path (integer
+# value)
+#quota_injected_file_path_bytes=255
+
+# Number of security groups per project (integer value)
+#quota_security_groups=10
+
+# Number of security rules per security group (integer value)
+#quota_security_group_rules=20
+
+# Number of key pairs per user (integer value)
+#quota_key_pairs=100
+
+# Number of seconds until a reservation expires (integer
+# value)
+#reservation_expire=86400
+
+# Count of reservations until usage is refreshed (integer
+# value)
+#until_refresh=0
+
+# Number of seconds between subsequent usage refreshes
+# (integer value)
+#max_age=0
+
+# Default driver to use for quota checks (string value)
+#quota_driver=nova.quota.DbQuotaDriver
+
+
+#
+# Options defined in nova.service
+#
+
+# Seconds between nodes reporting state to datastore (integer
+# value)
+#report_interval=10
+
+# Enable periodic tasks (boolean value)
+#periodic_enable=true
+
+# Range of seconds to randomly delay when starting the
+# periodic task scheduler to reduce stampeding. (Disable by
+# setting to 0) (integer value)
+#periodic_fuzzy_delay=60
+
+# A list of APIs to enable by default (list value)
+#enabled_apis=ec2,osapi_compute,metadata
+
+# A list of APIs with enabled SSL (list value)
+#enabled_ssl_apis=
+
+# The IP address on which the EC2 API will listen. (string
+# value)
+#ec2_listen=0.0.0.0
+
+# The port on which the EC2 API will listen. (integer value)
+#ec2_listen_port=8773
+
+# Number of workers for EC2 API service. The default will be
+# equal to the number of CPUs available. (integer value)
+#ec2_workers=<None>
+
+# The IP address on which the OpenStack API will listen.
+# (string value)
+#osapi_compute_listen=0.0.0.0
+
+# The port on which the OpenStack API will listen. (integer
+# value)
+#osapi_compute_listen_port=8774
+
+# Number of workers for OpenStack API service. The default
+# will be the number of CPUs available. (integer value)
+#osapi_compute_workers=<None>
+
+# OpenStack metadata service manager (string value)
+#metadata_manager=nova.api.manager.MetadataManager
+
+# The IP address on which the metadata API will listen.
+# (string value)
+#metadata_listen=0.0.0.0
+
+# The port on which the metadata API will listen. (integer
+# value)
+#metadata_listen_port=8775
+
+# Number of workers for metadata service. The default will be
+# the number of CPUs available. (integer value)
+#metadata_workers=<None>
+
+# Full class name for the Manager for compute (string value)
+#compute_manager=nova.compute.manager.ComputeManager
+
+# Full class name for the Manager for console proxy (string
+# value)
+#console_manager=nova.console.manager.ConsoleProxyManager
+
+# Manager for console auth (string value)
+#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager
+
+# Full class name for the Manager for cert (string value)
+#cert_manager=nova.cert.manager.CertManager
+
+# Full class name for the Manager for network (string value)
+#network_manager=nova.network.manager.VlanManager
+
+# Full class name for the Manager for scheduler (string value)
+#scheduler_manager=nova.scheduler.manager.SchedulerManager
+
+# Maximum time since last check-in for up service (integer
+# value)
+#service_down_time=60
+
+
+#
+# Options defined in nova.test
+#
+
+# File name of clean sqlite db (string value)
+#sqlite_clean_db=clean.sqlite
+
+
+#
+# Options defined in nova.utils
+#
+
+# Whether to log monkey patching (boolean value)
+#monkey_patch=false
+
+# List of modules/decorators to monkey patch (list value)
+#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator
+
+# Length of generated instance admin passwords (integer value)
+#password_length=12
+
+# Time period to generate instance usages for. Time period
+# must be hour, day, month or year (string value)
+#instance_usage_audit_period=month
+
+# Path to the rootwrap configuration file to use for running
+# commands as root (string value)
+#rootwrap_config=/etc/nova/rootwrap.conf
+
+# Explicitly specify the temporary working directory (string
+# value)
+#tempdir=<None>
+
+
+#
+# Options defined in nova.wsgi
+#
+
+# File name for the paste.deploy config for nova-api (string
+# value)
+#api_paste_config=api-paste.ini
+
+# A python format string that is used as the template to
+# generate log lines. The following values can be formatted
+# into it: client_ip, date_time, request_line, status_code,
+# body_length, wall_seconds. (string value)
+#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ssl_ca_file=<None>
+
+# SSL certificate of API server (string value)
+#ssl_cert_file=<None>
+
+# SSL private key of API server (string value)
+#ssl_key_file=<None>
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Not supported on OS X. (integer value)
+#tcp_keepidle=600
+
+# Size of the pool of greenthreads used by wsgi (integer
+# value)
+#wsgi_default_pool_size=1000
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large
+# tokens (typically those generated by the Keystone v3 API
+# with big service catalogs). (integer value)
+#max_header_line=16384
+
+
+#
+# Options defined in nova.api.auth
+#
+
+# Whether to use per-user rate limiting for the api. This
+# option is only used by v2 api. Rate limiting is removed from
+# v3 api. (boolean value)
+#api_rate_limit=false
+
+# The strategy to use for auth: noauth or keystone. (string
+# value)
+#auth_strategy=noauth
+
+# Treat X-Forwarded-For as the canonical remote address. Only
+# enable this if you have a sanitizing proxy. (boolean value)
+#use_forwarded_for=false
+
+
+#
+# Options defined in nova.api.ec2
+#
+
+# Number of failed auths before lockout. (integer value)
+#lockout_attempts=5
+
+# Number of minutes to lockout if triggered. (integer value)
+#lockout_minutes=15
+
+# Number of minutes for lockout window. (integer value)
+#lockout_window=15
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+# Return the IP address as private dns hostname in describe
+# instances (boolean value)
+#ec2_private_dns_show_ip=false
+
+# Validate security group names according to EC2 specification
+# (boolean value)
+#ec2_strict_validation=true
+
+# Time in seconds before ec2 timestamp expires (integer value)
+#ec2_timestamp_expiry=300
+
+
+#
+# Options defined in nova.api.ec2.cloud
+#
+
+# The IP address of the EC2 API server (string value)
+#ec2_host=$my_ip
+
+# The internal IP address of the EC2 API server (string value)
+#ec2_dmz_host=$my_ip
+
+# The port of the EC2 API server (integer value)
+#ec2_port=8773
+
+# The protocol to use when connecting to the EC2 API server
+# (http, https) (string value)
+#ec2_scheme=http
+
+# The path prefix used to call the ec2 API server (string
+# value)
+#ec2_path=/services/Cloud
+
+# List of region=fqdn pairs separated by commas (list value)
+#region_list=
+
+
+#
+# Options defined in nova.api.metadata.base
+#
+
+# List of metadata versions to skip placing into the config
+# drive (string value)
+#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
+
+# Driver to use for vendor data (string value)
+#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData
+
+
+#
+# Options defined in nova.api.metadata.handler
+#
+
+# Set flag to indicate Neutron will proxy metadata requests
+# and resolve instance ids. (boolean value)
+#service_neutron_metadata_proxy=false
+
+# Shared secret to validate proxies Neutron metadata requests
+# (string value)
+#neutron_metadata_proxy_shared_secret=
+
+
+#
+# Options defined in nova.api.metadata.vendordata_json
+#
+
+# File to load json formatted vendor data from (string value)
+#vendordata_jsonfile_path=<None>
+
+
+#
+# Options defined in nova.api.openstack.common
+#
+
+# The maximum number of items returned in a single response
+# from a collection resource (integer value)
+#osapi_max_limit=1000
+
+# Base URL that will be presented to users in links to the
+# OpenStack Compute API (string value)
+#osapi_compute_link_prefix=<None>
+
+# Base URL that will be presented to users in links to glance
+# resources (string value)
+#osapi_glance_link_prefix=<None>
+
+
+#
+# Options defined in nova.api.openstack.compute
+#
+
+# Permit instance snapshot operations. (boolean value)
+#allow_instance_snapshots=true
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib
+#
+
+# Specify list of extensions to load when using
+# osapi_compute_extension option with
+# nova.api.openstack.compute.contrib.select_extensions (list
+# value)
+#osapi_compute_ext_list=
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib.fping
+#
+
+# Full path to fping. (string value)
+#fping_path=/usr/sbin/fping
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks
+#
+
+# Enables or disables quota checking for tenant networks
+# (boolean value)
+#enable_network_quota=false
+
+# Control for checking for default networks (string value)
+#use_neutron_default_nets=False
+
+# Default tenant id when creating neutron networks (string
+# value)
+#neutron_default_tenant_id=default
+
+
+#
+# Options defined in nova.api.openstack.compute.extensions
+#
+
+# osapi compute extension to load (multi valued)
+#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
+
+
+#
+# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses
+#
+
+# List of instance states that should hide network info (list
+# value)
+#osapi_hide_server_address_states=building
+
+
+#
+# Options defined in nova.api.openstack.compute.servers
+#
+
+# Enables returning of the instance password by the relevant
+# server API calls such as create, rebuild or rescue, If the
+# hypervisor does not support password injection then the
+# password returned will not be correct (boolean value)
+#enable_instance_password=true
+
+
+#
+# Options defined in nova.api.sizelimit
+#
+
+# The maximum body size per each osapi request(bytes) (integer
+# value)
+#osapi_max_request_body_size=114688
+
+
+#
+# Options defined in nova.cert.rpcapi
+#
+
+# The topic cert nodes listen on (string value)
+#cert_topic=cert
+
+
+#
+# Options defined in nova.cloudpipe.pipelib
+#
+
+# Image ID used when starting up a cloudpipe vpn server
+# (string value)
+#vpn_image_id=0
+
+# Flavor for vpn instances (string value)
+#vpn_flavor=m1.tiny
+
+# Template for cloudpipe instance boot script (string value)
+#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template
+
+# Network to push into openvpn config (string value)
+#dmz_net=10.0.0.0
+
+# Netmask to push into openvpn config (string value)
+#dmz_mask=255.255.255.0
+
+# Suffix to add to project name for vpn key and secgroups
+# (string value)
+#vpn_key_suffix=-vpn
+
+
+#
+# Options defined in nova.cmd.novnc
+#
+
+# Record sessions to FILE.[session_number] (boolean value)
+#record=false
+
+# Become a daemon (background process) (boolean value)
+#daemon=false
+
+# Disallow non-encrypted connections (boolean value)
+#ssl_only=false
+
+# Source is ipv6 (boolean value)
+#source_is_ipv6=false
+
+# SSL certificate file (string value)
+#cert=self.pem
+
+# SSL key file (if separate from cert) (string value)
+#key=<None>
+
+# Run webserver on same port. Serve files from DIR. (string
+# value)
+#web=/usr/share/spice-html5
+
+
+#
+# Options defined in nova.cmd.novncproxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#novncproxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#novncproxy_port=6080
+
+
+#
+# Options defined in nova.cmd.spicehtml5proxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#spicehtml5proxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#spicehtml5proxy_port=6082
+
+
+#
+# Options defined in nova.compute.api
+#
+
+# Allow destination machine to match source for resize. Useful
+# when testing in single-host environments. (boolean value)
+#allow_resize_to_same_host=false
+
+# Allow migrate machine to the same host. Useful when testing
+# in single-host environments. (boolean value)
+#allow_migrate_to_same_host=false
+
+# Availability zone to use when user doesn't specify one
+# (string value)
+#default_schedule_zone=<None>
+
+# These are image properties which a snapshot should not
+# inherit from an instance (list value)
+#non_inheritable_image_properties=cache_in_nova,bittorrent
+
+# Kernel image that indicates not to use a kernel, but to use
+# a raw disk image instead (string value)
+#null_kernel=nokernel
+
+# When creating multiple instances with a single request using
+# the os-multiple-create API extension, this template will be
+# used to build the display name for each instance. The
+# benefit is that the instances end up with different
+# hostnames. To restore legacy behavior of every instance
+# having the same name, set this option to "%(name)s". Valid
+# keys for the template are: name, uuid, count. (string value)
+#multi_instance_display_name_template=%(name)s-%(uuid)s
+
+# Maximum number of devices that will result in a local image
+# being created on the hypervisor node. Setting this to 0
+# means nova will allow only boot from volume. A negative
+# number means unlimited. (integer value)
+#max_local_block_devices=3
+
+
+#
+# Options defined in nova.compute.flavors
+#
+
+# Default flavor to use for the EC2 API only. The Nova API
+# does not support a default flavor. (string value)
+#default_flavor=m1.small
+
+
+#
+# Options defined in nova.compute.manager
+#
+
+# Console proxy host to use to connect to instances on this
+# host. (string value)
+#console_host=nova
+
+# Name of network to use to set access IPs for instances
+# (string value)
+#default_access_ip_network_name=<None>
+
+# Whether to batch up the application of IPTables rules during
+# a host restart and apply all at the end of the init phase
+# (boolean value)
+#defer_iptables_apply=false
+
+# Where instances are stored on disk (string value)
+#instances_path=$state_path/instances
+
+# Generate periodic compute.instance.exists notifications
+# (boolean value)
+#instance_usage_audit=false
+
+# Number of 1 second retries needed in live_migration (integer
+# value)
+#live_migration_retry_count=30
+
+# Whether to start guests that were running before the host
+# rebooted (boolean value)
+#resume_guests_state_on_host_boot=false
+
+# Number of times to retry network allocation on failures
+# (integer value)
+#network_allocate_retries=0
+
+# The number of times to attempt to reap an instance's files.
+# (integer value)
+#maximum_instance_delete_attempts=5
+
+# Interval to pull network bandwidth usage info. Not supported
+# on all hypervisors. Set to 0 to disable. (integer value)
+#bandwidth_poll_interval=600
+
+# Interval to sync power states between the database and the
+# hypervisor (integer value)
+#sync_power_state_interval=600
+
+# Number of seconds between instance info_cache self healing
+# updates (integer value)
+#heal_instance_info_cache_interval=60
+
+# Interval in seconds for reclaiming deleted instances
+# (integer value)
+#reclaim_instance_interval=0
+
+# Interval in seconds for gathering volume usages (integer
+# value)
+#volume_usage_poll_interval=0
+
+# Interval in seconds for polling shelved instances to offload
+# (integer value)
+#shelved_poll_interval=3600
+
+# Time in seconds before a shelved instance is eligible for
+# removing from a host. -1 never offload, 0 offload when
+# shelved (integer value)
+#shelved_offload_time=0
+
+# Interval in seconds for retrying failed instance file
+# deletes (integer value)
+#instance_delete_interval=300
+
+# Action to take if a running deleted instance is
+# detected.Valid options are 'noop', 'log', 'shutdown', or
+# 'reap'. Set to 'noop' to take no action. (string value)
+#running_deleted_instance_action=reap
+
+# Number of seconds to wait between runs of the cleanup task.
+# (integer value)
+#running_deleted_instance_poll_interval=1800
+
+# Number of seconds after being deleted when a running
+# instance should be considered eligible for cleanup. (integer
+# value)
+#running_deleted_instance_timeout=0
+
+# Automatically hard reboot an instance if it has been stuck
+# in a rebooting state longer than N seconds. Set to 0 to
+# disable. (integer value)
+#reboot_timeout=0
+
+# Amount of time in seconds an instance can be in BUILD before
+# going into ERROR status.Set to 0 to disable. (integer value)
+#instance_build_timeout=0
+
+# Automatically unrescue an instance after N seconds. Set to 0
+# to disable. (integer value)
+#rescue_timeout=0
+
+# Automatically confirm resizes after N seconds. Set to 0 to
+# disable. (integer value)
+#resize_confirm_window=0
+
+
+#
+# Options defined in nova.compute.monitors
+#
+
+# Monitor classes available to the compute which may be
+# specified more than once. (multi valued)
+#compute_available_monitors=nova.compute.monitors.all_monitors
+
+# A list of monitors that can be used for getting compute
+# metrics. (list value)
+#compute_monitors=
+
+
+#
+# Options defined in nova.compute.resource_tracker
+#
+
+# Amount of disk in MB to reserve for the host (integer value)
+#reserved_host_disk_mb=0
+
+# Amount of memory in MB to reserve for the host (integer
+# value)
+#reserved_host_memory_mb=512
+
+# Class that will manage stats for the local compute host
+# (string value)
+#compute_stats_class=nova.compute.stats.Stats
+
+
+#
+# Options defined in nova.compute.rpcapi
+#
+
+# The topic compute nodes listen on (string value)
+#compute_topic=compute
+
+
+#
+# Options defined in nova.conductor.tasks.live_migrate
+#
+
+# Number of times to retry live-migration before failing. If
+# == -1, try until out of hosts. If == 0, only try once, no
+# retries. (integer value)
+#migrate_max_retries=-1
+
+
+#
+# Options defined in nova.console.manager
+#
+
+# Driver to use for the console proxy (string value)
+#console_driver=nova.console.xvp.XVPConsoleProxy
+
+# Stub calls to compute worker for tests (boolean value)
+#stub_compute=false
+
+# Publicly visible name for this console host (string value)
+#console_public_hostname=nova
+
+
+#
+# Options defined in nova.console.rpcapi
+#
+
+# The topic console proxy nodes listen on (string value)
+#console_topic=console
+
+
+#
+# Options defined in nova.console.vmrc
+#
+
+# Port for VMware VMRC connections (integer value)
+#console_vmrc_port=443
+
+# Number of retries for retrieving VMRC information (integer
+# value)
+#console_vmrc_error_retries=10
+
+
+#
+# Options defined in nova.console.xvp
+#
+
+# XVP conf template (string value)
+#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template
+
+# Generated XVP conf file (string value)
+#console_xvp_conf=/etc/xvp.conf
+
+# XVP master process pid file (string value)
+#console_xvp_pid=/var/run/xvp.pid
+
+# XVP log file (string value)
+#console_xvp_log=/var/log/xvp.log
+
+# Port for XVP to multiplex VNC connections on (integer value)
+#console_xvp_multiplex_port=5900
+
+
+#
+# Options defined in nova.consoleauth
+#
+
+# The topic console auth proxy nodes listen on (string value)
+#consoleauth_topic=consoleauth
+
+
+#
+# Options defined in nova.consoleauth.manager
+#
+
+# How many seconds before deleting tokens (integer value)
+#console_token_ttl=600
+
+
+#
+# Options defined in nova.db.api
+#
+
+# Services to be added to the available pool on create
+# (boolean value)
+#enable_new_services=true
+
+# Template string to be used to generate instance names
+# (string value)
+#instance_name_template=instance-%08x
+
+# Template string to be used to generate snapshot names
+# (string value)
+#snapshot_name_template=snapshot-%s
+
+
+#
+# Options defined in nova.db.base
+#
+
+# The driver to use for database access (string value)
+#db_driver=nova.db
+
+
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+
+# When set, compute API will consider duplicate hostnames
+# invalid within the specified scope, regardless of case.
+# Should be empty, "project" or "global". (string value)
+#osapi_compute_unique_server_name_scope=
+
+
+#
+# Options defined in nova.image.glance
+#
+
+# Default glance hostname or IP address (string value)
+#glance_host=$my_ip
+
+# Default glance port (integer value)
+#glance_port=9292
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#glance_protocol=http
+
+# A list of the glance api servers available to nova. Prefix
+# with https:// for ssl-based glance api servers.
+# ([hostname|ip]:port) (list value)
+#glance_api_servers=$glance_host:$glance_port
+
+# Allow to perform insecure SSL (https) requests to glance
+# (boolean value)
+#glance_api_insecure=false
+
+# Number of retries when downloading an image from glance
+# (integer value)
+#glance_num_retries=0
+
+# A list of url scheme that can be downloaded directly via the
+# direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+
+#
+# Options defined in nova.image.s3
+#
+
+# Parent directory for tempdir used for image decryption
+# (string value)
+#image_decryption_dir=/tmp
+
+# Hostname or IP for OpenStack to use when accessing the S3
+# api (string value)
+#s3_host=$my_ip
+
+# Port used when accessing the S3 api (integer value)
+#s3_port=3333
+
+# Access key to use for S3 server for images (string value)
+#s3_access_key=notchecked
+
+# Secret key to use for S3 server for images (string value)
+#s3_secret_key=notchecked
+
+# Whether to use SSL when talking to S3 (boolean value)
+#s3_use_ssl=false
+
+# Whether to affix the tenant id to the access key when
+# downloading from S3 (boolean value)
+#s3_affix_tenant=false
+
+
+#
+# Options defined in nova.ipv6.api
+#
+
+# Backend to use for IPv6 generation (string value)
+#ipv6_backend=rfc2462
+
+
+#
+# Options defined in nova.network
+#
+
+# The full class name of the network API class to use (string
+# value)
+#network_api_class=nova.network.api.API
+
+
+#
+# Options defined in nova.network.driver
+#
+
+# Driver to use for network creation (string value)
+#network_driver=nova.network.linux_net
+
+
+#
+# Options defined in nova.network.floating_ips
+#
+
+# Default pool for floating IPs (string value)
+#default_floating_pool=nova
+
+# Autoassigning floating IP to VM (boolean value)
+#auto_assign_floating_ip=false
+
+# Full class name for the DNS Manager for floating IPs (string
+# value)
+#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# Full class name for the DNS Manager for instance IPs (string
+# value)
+#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# Full class name for the DNS Zone for instance IPs (string
+# value)
+#instance_dns_domain=
+
+
+#
+# Options defined in nova.network.ldapdns
+#
+
+# URL for LDAP server which will store DNS entries (string
+# value)
+#ldap_dns_url=ldap://ldap.example.com:389
+
+# User for LDAP DNS (string value)
+#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org
+
+# Password for LDAP DNS (string value)
+#ldap_dns_password=password
+
+# Hostmaster for LDAP DNS driver Statement of Authority
+# (string value)
+#ldap_dns_soa_hostmaster=hostmaster@example.org
+
+# DNS Servers for LDAP DNS driver (multi valued)
+#ldap_dns_servers=dns.example.org
+
+# Base DN for DNS entries in LDAP (string value)
+#ldap_dns_base_dn=ou=hosts,dc=example,dc=org
+
+# Refresh interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_refresh=1800
+
+# Retry interval (in seconds) for LDAP DNS driver Statement of
+# Authority (string value)
+#ldap_dns_soa_retry=3600
+
+# Expiry interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_expiry=86400
+
+# Minimum interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_minimum=7200
+
+
+#
+# Options defined in nova.network.linux_net
+#
+
+# Location of flagfiles for dhcpbridge (multi valued)
+#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
+
+# Location to keep network config files (string value)
+#networks_path=$state_path/networks
+
+# Interface for public IP addresses (string value)
+#public_interface=eth0
+
+# MTU setting for network interface (integer value)
+#network_device_mtu=<None>
+
+# Location of nova-dhcpbridge (string value)
+#dhcpbridge=$bindir/nova-dhcpbridge
+
+# Public IP of network host (string value)
+#routing_source_ip=$my_ip
+
+# Lifetime of a DHCP lease in seconds (integer value)
+#dhcp_lease_time=120
+
+# If set, uses specific DNS server for dnsmasq. Can be
+# specified multiple times. (multi valued)
+#dns_server=
+
+# If set, uses the dns1 and dns2 from the network ref. as dns
+# servers. (boolean value)
+#use_network_dns_servers=false
+
+# A list of dmz range that should be accepted (list value)
+#dmz_cidr=
+
+# Traffic to this range will always be snatted to the fallback
+# ip, even if it would normally be bridged out of the node.
+# Can be specified multiple times. (multi valued)
+#force_snat_range=
+
+# Override the default dnsmasq settings with this file (string
+# value)
+#dnsmasq_config_file=
+
+# Driver used to create ethernet devices. (string value)
+#linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver
+
+# Name of Open vSwitch bridge used with linuxnet (string
+# value)
+#linuxnet_ovs_integration_bridge=br-int
+
+# Send gratuitous ARPs for HA setup (boolean value)
+#send_arp_for_ha=false
+
+# Send this many gratuitous ARPs for HA setup (integer value)
+#send_arp_for_ha_count=3
+
+# Use single default gateway. Only first nic of vm will get
+# default gateway from dhcp server (boolean value)
+#use_single_default_gateway=false
+
+# An interface that bridges can forward to. If this is set to
+# all then all traffic will be forwarded. Can be specified
+# multiple times. (multi valued)
+#forward_bridge_interface=all
+
+# The IP address for the metadata API server (string value)
+#metadata_host=$my_ip
+
+# The port for the metadata API port (integer value)
+#metadata_port=8775
+
+# Regular expression to match iptables rule that should always
+# be on the top. (string value)
+#iptables_top_regex=
+
+# Regular expression to match iptables rule that should always
+# be on the bottom. (string value)
+#iptables_bottom_regex=
+
+# The table that iptables to jump to when a packet is to be
+# dropped. (string value)
+#iptables_drop_action=DROP
+
+# Amount of time, in seconds, that ovs_vsctl should wait for a
+# response from the database. 0 is to wait forever. (integer
+# value)
+#ovs_vsctl_timeout=120
+
+# If passed, use fake network devices and addresses (boolean
+# value)
+#fake_network=false
+
+
+#
+# Options defined in nova.network.manager
+#
+
+# Bridge for simple network instances (string value)
+#flat_network_bridge=<None>
+
+# DNS server for simple network (string value)
+#flat_network_dns=8.8.4.4
+
+# Whether to attempt to inject network setup into guest
+# (boolean value)
+#flat_injected=false
+
+# FlatDhcp will bridge into this interface if set (string
+# value)
+#flat_interface=<None>
+
+# First VLAN for private networks (integer value)
+#vlan_start=100
+
+# VLANs will bridge into this interface if set (string value)
+#vlan_interface=<None>
+
+# Number of networks to support (integer value)
+#num_networks=1
+
+# Public IP for the cloudpipe VPN servers (string value)
+#vpn_ip=$my_ip
+
+# First Vpn port for private networks (integer value)
+#vpn_start=1000
+
+# Number of addresses in each private subnet (integer value)
+#network_size=256
+
+# Fixed IPv6 address block (string value)
+#fixed_range_v6=fd00::/48
+
+# Default IPv4 gateway (string value)
+#gateway=<None>
+
+# Default IPv6 gateway (string value)
+#gateway_v6=<None>
+
+# Number of addresses reserved for vpn clients (integer value)
+#cnt_vpn_clients=0
+
+# Seconds after which a deallocated IP is disassociated
+# (integer value)
+#fixed_ip_disassociate_timeout=600
+
+# Number of attempts to create unique mac address (integer
+# value)
+#create_unique_mac_address_attempts=5
+
+# If True, skip using the queue and make local calls (boolean
+# value)
+#fake_call=false
+
+# If True, unused gateway devices (VLAN and bridge) are
+# deleted in VLAN network mode with multi hosted networks
+# (boolean value)
+#teardown_unused_network_gateway=false
+
+# If True, send a dhcp release on instance termination
+# (boolean value)
+#force_dhcp_release=true
+
+# If True in multi_host mode, all compute hosts share the same
+# dhcp address. The same IP address used for DHCP will be
+# added on each nova-network node which is only visible to the
+# vms on the same host. (boolean value)
+#share_dhcp_address=false
+
+# If True, when a DNS entry must be updated, it sends a fanout
+# cast to all network hosts to update their DNS entries in
+# multi host mode (boolean value)
+#update_dns_entries=false
+
+# Number of seconds to wait between runs of updates to DNS
+# entries. (integer value)
+#dns_update_periodic_interval=-1
+
+# Domain to use for building the hostnames (string value)
+#dhcp_domain=novalocal
+
+# Indicates underlying L3 management library (string value)
+#l3_lib=nova.network.l3.LinuxNetL3
+
+
+#
+# Options defined in nova.network.neutronv2.api
+#
+
+# URL for connecting to neutron (string value)
+#neutron_url=http://127.0.0.1:9696
+
+# Timeout value for connecting to neutron in seconds (integer
+# value)
+#neutron_url_timeout=30
+
+# Username for connecting to neutron in admin context (string
+# value)
+#neutron_admin_username=<None>
+
+# Password for connecting to neutron in admin context (string
+# value)
+#neutron_admin_password=<None>
+
+# Tenant id for connecting to neutron in admin context (string
+# value)
+#neutron_admin_tenant_id=<None>
+
+# Tenant name for connecting to neutron in admin context. This
+# option is mutually exclusive with neutron_admin_tenant_id.
+# Note that with Keystone V3 tenant names are only unique
+# within a domain. (string value)
+#neutron_admin_tenant_name=<None>
+
+# Region name for connecting to neutron in admin context
+# (string value)
+#neutron_region_name=<None>
+
+# Authorization URL for connecting to neutron in admin context
+# (string value)
+#neutron_admin_auth_url=http://localhost:5000/v2.0
+
+# If set, ignore any SSL validation issues (boolean value)
+#neutron_api_insecure=false
+
+# Authorization strategy for connecting to neutron in admin
+# context (string value)
+#neutron_auth_strategy=keystone
+
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#neutron_ovs_bridge=br-int
+
+# Number of seconds before querying neutron for extensions
+# (integer value)
+#neutron_extension_sync_interval=600
+
+# Location of CA certificates file to use for neutron client
+# requests. (string value)
+#neutron_ca_certificates_file=<None>
+
+
+#
+# Options defined in nova.network.rpcapi
+#
+
+# The topic network nodes listen on (string value)
+#network_topic=network
+
+# Default value for multi_host in networks. Also, if set, some
+# rpc network calls will be sent directly to host. (boolean
+# value)
+#multi_host=false
+
+
+#
+# Options defined in nova.network.security_group.openstack_driver
+#
+
+# The full class name of the security API class (string value)
+#security_group_api=nova
+
+
+#
+# Options defined in nova.objectstore.s3server
+#
+
+# Path to S3 buckets (string value)
+#buckets_path=$state_path/buckets
+
+# IP address for S3 API to listen (string value)
+#s3_listen=0.0.0.0
+
+# Port for S3 API to listen (integer value)
+#s3_listen_port=3333
+
+
+#
+# Options defined in nova.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in nova.openstack.common.lockutils
+#
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in nova.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
+
+# Publish error events (boolean value)
+#publish_errors=false
+
+# Make deprecations fatal (boolean value)
+#fatal_deprecations=false
+
+# If an instance is passed with the log message, format it
+# like this (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# If an instance UUID is passed with the log message, format
+# it like this (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of logging configuration file. It does not disable
+# existing loggers, but just appends specified logging
+# configuration to any other existing logging options. Please
+# see the Python logging module documentation for details on
+# logging configuration files. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and then will be changed in J to honor RFC5424
+# (boolean value)
+#use_syslog=false
+
+# (Optional) Use syslog rfc5424 format for logging. If
+# enabled, will add APP-NAME (RFC5424) before the MSG part of
+# the syslog message. The old format without APP-NAME is
+# deprecated in I, and will be removed in J. (boolean value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in nova.openstack.common.memorycache
+#
+
+# Memcached servers or None for in process cache. (list value)
+#memcached_servers=<None>
+
+
+#
+# Options defined in nova.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
+# Options defined in nova.pci.pci_request
+#
+
+# An alias for a PCI passthrough device requirement. This
+# allows users to specify the alias in the extra_spec for a
+# flavor, without needing to repeat all the PCI property
+# requirements. For example: pci_alias = { "name":
+# "QuicAssist", "product_id": "0443", "vendor_id": "8086",
+# "device_type": "ACCEL" } defines an alias for the Intel
+# QuickAssist card. (multi valued) (multi valued)
+#pci_alias=
+
+
+#
+# Options defined in nova.pci.pci_whitelist
+#
+
+# White list of PCI devices available to VMs. For example:
+# pci_passthrough_whitelist = [{"vendor_id": "8086",
+# "product_id": "0443"}] (multi valued)
+#pci_passthrough_whitelist=
+
+
+#
+# Options defined in nova.scheduler.driver
+#
+
+# The scheduler host manager class to use (string value)
+#scheduler_host_manager=nova.scheduler.host_manager.HostManager
+
+# Maximum number of attempts to schedule an instance (integer
+# value)
+#scheduler_max_attempts=3
+
+
+#
+# Options defined in nova.scheduler.filter_scheduler
+#
+
+# New instances will be scheduled on a host chosen randomly
+# from a subset of the N best hosts. This property defines the
+# subset size that a host is chosen from. A value of 1 chooses
+# the first host returned by the weighing functions. This
+# value must be at least 1. Any value less than 1 will be
+# ignored, and 1 will be used instead (integer value)
+#scheduler_host_subset_size=1
+
+
+#
+# Options defined in nova.scheduler.filters.aggregate_image_properties_isolation
+#
+
+# Force the filter to consider only keys matching the given
+# namespace. (string value)
+#aggregate_image_properties_isolation_namespace=<None>
+
+# The separator used between the namespace and keys (string
+# value)
+#aggregate_image_properties_isolation_separator=.
+
+
+#
+# Options defined in nova.scheduler.filters.core_filter
+#
+
+# Virtual CPU to physical CPU allocation ratio which affects
+# all CPU filters. This configuration specifies a global ratio
+# for CoreFilter. For AggregateCoreFilter, it will fall back
+# to this configuration value if no per-aggregate setting
+# found. (floating point value)
+#cpu_allocation_ratio=16.0
+
+
+#
+# Options defined in nova.scheduler.filters.disk_filter
+#
+
+# Virtual disk to physical disk allocation ratio (floating
+# point value)
+#disk_allocation_ratio=1.0
+
+
+#
+# Options defined in nova.scheduler.filters.io_ops_filter
+#
+
+# Ignore hosts that have too many
+# builds/resizes/snaps/migrations (integer value)
+#max_io_ops_per_host=8
+
+
+#
+# Options defined in nova.scheduler.filters.isolated_hosts_filter
+#
+
+# Images to run on isolated host (list value)
+#isolated_images=
+
+# Host reserved for specific images (list value)
+#isolated_hosts=
+
+# Whether to force isolated hosts to run only isolated images
+# (boolean value)
+#restrict_isolated_hosts_to_isolated_images=true
+
+
+#
+# Options defined in nova.scheduler.filters.num_instances_filter
+#
+
+# Ignore hosts that have too many instances (integer value)
+#max_instances_per_host=50
+
+
+#
+# Options defined in nova.scheduler.filters.ram_filter
+#
+
+# Virtual ram to physical ram allocation ratio which affects
+# all ram filters. This configuration specifies a global ratio
+# for RamFilter. For AggregateRamFilter, it will fall back to
+# this configuration value if no per-aggregate setting found.
+# (floating point value)
+#ram_allocation_ratio=1.5
+
+
+#
+# Options defined in nova.scheduler.host_manager
+#
+
+# Filter classes available to the scheduler which may be
+# specified more than once. An entry of
+# "nova.scheduler.filters.standard_filters" maps to all
+# filters included with nova. (multi valued)
+#scheduler_available_filters=nova.scheduler.filters.all_filters
+
+# Which filter class names to use for filtering hosts when not
+# specified in the request. (list value)
+#scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
+
+# Which weight class names to use for weighing hosts (list
+# value)
+#scheduler_weight_classes=nova.scheduler.weights.all_weighers
+
+
+#
+# Options defined in nova.scheduler.manager
+#
+
+# Default driver to use for the scheduler (string value)
+#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
+
+# How often (in seconds) to run periodic tasks in the
+# scheduler driver of your choice. Please note this is likely
+# to interact with the value of service_down_time, but exactly
+# how they interact will depend on your choice of scheduler
+# driver. (integer value)
+#scheduler_driver_task_period=60
+
+
+#
+# Options defined in nova.scheduler.rpcapi
+#
+
+# The topic scheduler nodes listen on (string value)
+#scheduler_topic=scheduler
+
+
+#
+# Options defined in nova.scheduler.scheduler_options
+#
+
+# Absolute path to scheduler configuration JSON file. (string
+# value)
+#scheduler_json_config_location=
+
+
+#
+# Options defined in nova.scheduler.weights.ram
+#
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=1.0
+
+
+#
+# Options defined in nova.servicegroup.api
+#
+
+# The driver for servicegroup service (valid options are: db,
+# zk, mc) (string value)
+#servicegroup_driver=db
+
+
+#
+# Options defined in nova.virt.configdrive
+#
+
+# Config drive format. One of iso9660 (default) or vfat
+# (string value)
+#config_drive_format=iso9660
+
+# Where to put temporary files associated with config drive
+# creation (string value)
+#config_drive_tempdir=<None>
+
+# Set to force injection to take place on a config drive (if
+# set, valid options are: always) (string value)
+#force_config_drive=<None>
+
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#mkisofs_cmd=genisoimage
+
+
+#
+# Options defined in nova.virt.cpu
+#
+
+# Defines which pcpus that instance vcpus can use. For
+# example, "4-12,^8,15" (string value)
+#vcpu_pin_set=<None>
+
+
+#
+# Options defined in nova.virt.disk.api
+#
+
+# Template file for injected network (string value)
+#injected_network_template=$pybasedir/nova/virt/interfaces.template
+
+# Name of the mkfs commands for ephemeral device. The format
+# is <os_type>=<mkfs command> (multi valued)
+#virt_mkfs=
+
+# Attempt to resize the filesystem by accessing the image over
+# a block device. This is done by the host and may not be
+# necessary if the image contains a recent version of cloud-
+# init. Possible mechanisms require the nbd driver (for qcow
+# and raw), or loop (for raw). (boolean value)
+#resize_fs_using_block_device=false
+
+
+#
+# Options defined in nova.virt.disk.mount.nbd
+#
+
+# Amount of time, in seconds, to wait for NBD device start up.
+# (integer value)
+#timeout_nbd=10
+
+
+#
+# Options defined in nova.virt.driver
+#
+
+# Driver to use for controlling virtualization. Options
+# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
+# fake.FakeDriver, baremetal.BareMetalDriver,
+# vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver (string
+# value)
+#compute_driver=<None>
+
+# The default format an ephemeral_volume will be formatted
+# with on creation. (string value)
+#default_ephemeral_format=<None>
+
+# VM image preallocation mode: "none" => no storage
+# provisioning is done up front, "space" => storage is fully
+# allocated at instance start (string value)
+#preallocate_images=none
+
+# Whether to use cow images (boolean value)
+#use_cow_images=true
+
+# Fail instance boot if vif plugging fails (boolean value)
+#vif_plugging_is_fatal=true
+
+# Number of seconds to wait for neutron vif plugging events to
+# arrive before continuing or failing (see
+# vif_plugging_is_fatal). If this is set to zero and
+# vif_plugging_is_fatal is False, events should not be
+# expected to arrive at all. (integer value)
+#vif_plugging_timeout=300
+
+
+#
+# Options defined in nova.virt.firewall
+#
+
+# Firewall driver (defaults to hypervisor specific iptables
+# driver) (string value)
+#firewall_driver=<None>
+
+# Whether to allow network traffic from same network (boolean
+# value)
+#allow_same_net_traffic=true
+
+
+#
+# Options defined in nova.virt.imagecache
+#
+
+# Number of seconds to wait between runs of the image cache
+# manager (integer value)
+#image_cache_manager_interval=2400
+
+# Where cached images are stored under $instances_path. This
+# is NOT the full path - just a folder name. For per-compute-
+# host cached images, set to _base_$my_ip (string value)
+# Deprecated group/name - [DEFAULT]/base_dir_name
+#image_cache_subdirectory_name=_base
+
+# Should unused base images be removed? (boolean value)
+#remove_unused_base_images=true
+
+# Unused unresized base images younger than this will not be
+# removed (integer value)
+#remove_unused_original_minimum_age_seconds=86400
+
+
+#
+# Options defined in nova.virt.imagehandler
+#
+
+# Specifies which image handler extension names to use for
+# handling images. The first extension in the list which can
+# handle the image with a suitable location will be used.
+# (list value)
+#image_handlers=download
+
+
+#
+# Options defined in nova.virt.images
+#
+
+# Force backing images to raw format (boolean value)
+#force_raw_images=true
+
+
+#
+# Options defined in nova.vnc
+#
+
+# Location of VNC console proxy, in the form
+# "http://127.0.0.1:6080/vnc_auto.html" (string value)
+#novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html
+
+# Location of nova xvp VNC console proxy, in the form
+# "http://127.0.0.1:6081/console" (string value)
+#xvpvncproxy_base_url=http://127.0.0.1:6081/console
+
+# IP address on which instance vncservers should listen
+# (string value)
+#vncserver_listen=127.0.0.1
+
+# The address to which proxy clients (like nova-xvpvncproxy)
+# should connect (string value)
+#vncserver_proxyclient_address=127.0.0.1
+
+# Enable VNC related features (boolean value)
+#vnc_enabled=true
+
+# Keymap for VNC (string value)
+#vnc_keymap=en-us
+
+
+#
+# Options defined in nova.vnc.xvp_proxy
+#
+
+# Port that the XCP VNC proxy should bind to (integer value)
+#xvpvncproxy_port=6081
+
+# Address that the XCP VNC proxy should bind to (string value)
+#xvpvncproxy_host=0.0.0.0
+
+
+#
+# Options defined in nova.volume
+#
+
+# The full class name of the volume API class to use (string
+# value)
+#volume_api_class=nova.volume.cinder.API
+
+
+#
+# Options defined in nova.volume.cinder
+#
+
+# Info to match when looking for cinder in the service
+# catalog. Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info=volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder
+# endpoint e.g. http://localhost:8776/v1/%(project_id)s
+# (string value)
+#cinder_endpoint_template=<None>
+
+# Region name of this node (string value)
+#os_region_name=<None>
+
+# Location of ca certificates file to use for cinder client
+# requests. (string value)
+#cinder_ca_certificates_file=<None>
+
+# Number of cinderclient retries on failed http calls (integer
+# value)
+#cinder_http_retries=3
+
+# Allow to perform insecure SSL requests to cinder (boolean
+# value)
+#cinder_api_insecure=false
+
+# Allow attach between instance and volume in different
+# availability zones. (boolean value)
+#cinder_cross_az_attach=true
+
+
+[baremetal]
+
+#
+# Options defined in nova.virt.baremetal.db.api
+#
+
+# The backend to use for bare-metal database (string value)
+#db_backend=sqlalchemy
+
+
+#
+# Options defined in nova.virt.baremetal.db.sqlalchemy.session
+#
+
+# The SQLAlchemy connection string used to connect to the
+# bare-metal database (string value)
+#sql_connection=sqlite:///$state_path/baremetal_nova.sqlite
+
+
+#
+# Options defined in nova.virt.baremetal.driver
+#
+
+# Baremetal VIF driver. (string value)
+#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver
+
+# Baremetal volume driver. (string value)
+#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver
+
+# A list of additional capabilities corresponding to
+# flavor_extra_specs for this compute host to advertise. Valid
+# entries are name=value, pairs For example, "key1:val1,
+# key2:val2" (list value)
+# Deprecated group/name - [DEFAULT]/instance_type_extra_specs
+#flavor_extra_specs=
+
+# Baremetal driver back-end (pxe or tilera) (string value)
+#driver=nova.virt.baremetal.pxe.PXE
+
+# Baremetal power management method (string value)
+#power_manager=nova.virt.baremetal.ipmi.IPMI
+
+# Baremetal compute node's tftp root path (string value)
+#tftp_root=/tftpboot
+
+
+#
+# Options defined in nova.virt.baremetal.ipmi
+#
+
+# Path to baremetal terminal program (string value)
+#terminal=shellinaboxd
+
+# Path to baremetal terminal SSL cert(PEM) (string value)
+#terminal_cert_dir=<None>
+
+# Path to directory stores pidfiles of baremetal_terminal
+# (string value)
+#terminal_pid_dir=$state_path/baremetal/console
+
+# Maximal number of retries for IPMI operations (integer
+# value)
+#ipmi_power_retry=10
+
+
+#
+# Options defined in nova.virt.baremetal.pxe
+#
+
+# Default kernel image ID used in deployment phase (string
+# value)
+#deploy_kernel=<None>
+
+# Default ramdisk image ID used in deployment phase (string
+# value)
+#deploy_ramdisk=<None>
+
+# Template file for injected network config (string value)
+#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template
+
+# Additional append parameters for baremetal PXE boot (string
+# value)
+#pxe_append_params=nofb nomodeset vga=normal
+
+# Template file for PXE configuration (string value)
+#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template
+
+# If True, enable file injection for network info, files and
+# admin password (boolean value)
+#use_file_injection=false
+
+# Timeout for PXE deployments. Default: 0 (unlimited) (integer
+# value)
+#pxe_deploy_timeout=0
+
+# If set, pass the network configuration details to the
+# initramfs via cmdline. (boolean value)
+#pxe_network_config=false
+
+# This gets passed to Neutron as the bootfile dhcp parameter.
+# (string value)
+#pxe_bootfile_name=pxelinux.0
+
+
+#
+# Options defined in nova.virt.baremetal.tilera_pdu
+#
+
+# IP address of tilera pdu (string value)
+#tile_pdu_ip=10.0.100.1
+
+# Management script for tilera pdu (string value)
+#tile_pdu_mgr=/tftpboot/pdu_mgr
+
+# Power status of tilera PDU is OFF (integer value)
+#tile_pdu_off=2
+
+# Power status of tilera PDU is ON (integer value)
+#tile_pdu_on=1
+
+# Power status of tilera PDU (integer value)
+#tile_pdu_status=9
+
+# Wait time in seconds until check the result after tilera
+# power operations (integer value)
+#tile_power_wait=9
+
+
+#
+# Options defined in nova.virt.baremetal.virtual_power_driver
+#
+
+# IP or name to virtual power host (string value)
+#virtual_power_ssh_host=
+
+# Port to use for ssh to virtual power host (integer value)
+#virtual_power_ssh_port=22
+
+# Base command to use for virtual power(vbox, virsh) (string
+# value)
+#virtual_power_type=virsh
+
+# User to execute virtual power commands as (string value)
+#virtual_power_host_user=
+
+# Password for virtual power host_user (string value)
+#virtual_power_host_pass=
+
+# The ssh key for virtual power host_user (string value)
+#virtual_power_host_key=<None>
+
+
+#
+# Options defined in nova.virt.baremetal.volume_driver
+#
+
+# Do not set this out of dev/test environments. If a node does
+# not have a fixed PXE IP address, volumes are exported with
+# globally opened ACL (boolean value)
+#use_unsafe_iscsi=false
+
+# The iSCSI IQN prefix used in baremetal volume connections.
+# (string value)
+#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal
+
+
+[cells]
+
+#
+# Options defined in nova.cells.manager
+#
+
+# Cells communication driver to use (string value)
+#driver=nova.cells.rpc_driver.CellsRPCDriver
+
+# Number of seconds after an instance was updated or deleted
+# to continue to update cells (integer value)
+#instance_updated_at_threshold=3600
+
+# Number of instances to update per periodic task run (integer
+# value)
+#instance_update_num_instances=1
+
+
+#
+# Options defined in nova.cells.messaging
+#
+
+# Maximum number of hops for cells routing. (integer value)
+#max_hop_count=10
+
+# Cells scheduler to use (string value)
+#scheduler=nova.cells.scheduler.CellsScheduler
+
+
+#
+# Options defined in nova.cells.opts
+#
+
+# Enable cell functionality (boolean value)
+#enable=false
+
+# The topic cells nodes listen on (string value)
+#topic=cells
+
+# Manager for cells (string value)
+#manager=nova.cells.manager.CellsManager
+
+# Name of this cell (string value)
+#name=nova
+
+# Key/Multi-value list with the capabilities of the cell (list
+# value)
+#capabilities=hypervisor=xenserver;kvm,os=linux;windows
+
+# Seconds to wait for response from a call to a cell. (integer
+# value)
+#call_timeout=60
+
+# Percentage of cell capacity to hold in reserve. Affects both
+# memory and disk utilization (floating point value)
+#reserve_percent=10.0
+
+# Type of cell: api or compute (string value)
+#cell_type=compute
+
+# Number of seconds after which a lack of capability and
+# capacity updates signals the child cell is to be treated as
+# a mute. (integer value)
+#mute_child_interval=300
+
+# Seconds between bandwidth updates for cells. (integer value)
+#bandwidth_update_interval=600
+
+
+#
+# Options defined in nova.cells.rpc_driver
+#
+
+# Base queue name to use when communicating between cells.
+# Various topics by message type will be appended to this.
+# (string value)
+#rpc_driver_queue_base=cells.intercell
+
+
+#
+# Options defined in nova.cells.scheduler
+#
+
+# Filter classes the cells scheduler should use. An entry of
+# "nova.cells.filters.all_filters" maps to all cells filters
+# included with nova. (list value)
+#scheduler_filter_classes=nova.cells.filters.all_filters
+
+# Weigher classes the cells scheduler should use. An entry of
+# "nova.cells.weights.all_weighers" maps to all cell weighers
+# included with nova. (list value)
+#scheduler_weight_classes=nova.cells.weights.all_weighers
+
+# How many retries when no cells are available. (integer
+# value)
+#scheduler_retries=10
+
+# How often to retry in seconds when no cells are available.
+# (integer value)
+#scheduler_retry_delay=2
+
+
+#
+# Options defined in nova.cells.state
+#
+
+# Interval, in seconds, for getting fresh cell information
+# from the database. (integer value)
+#db_check_interval=60
+
+# Configuration file from which to read cells configuration.
+# If given, overrides reading cells from the database. (string
+# value)
+#cells_config=<None>
+
+
+#
+# Options defined in nova.cells.weights.mute_child
+#
+
+# Multiplier used to weigh mute children. (The value should be
+# negative.) (floating point value)
+#mute_weight_multiplier=-10.0
+
+# Weight value assigned to mute children. (The value should be
+# positive.) (floating point value)
+#mute_weight_value=1000.0
+
+
+#
+# Options defined in nova.cells.weights.ram_by_instance_type
+#
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=10.0
+
+
+#
+# Options defined in nova.cells.weights.weight_offset
+#
+
+# Multiplier used to weigh offset weigher. (floating point
+# value)
+#offset_weight_multiplier=1.0
+
+
+[conductor]
+
+#
+# Options defined in nova.conductor.api
+#
+
+# Perform nova-conductor operations locally (boolean value)
+#use_local=false
+
+# The topic on which conductor nodes listen (string value)
+#topic=conductor
+
+# Full class name for the Manager for conductor (string value)
+#manager=nova.conductor.manager.ConductorManager
+
+# Number of workers for OpenStack Conductor service. The
+# default will be the number of CPUs available. (integer
+# value)
+#workers=<None>
+
+
+[database]
+
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+
+# The SQLAlchemy connection string used to connect to the
+# slave database (string value)
+#slave_connection=<None>
+
+
+#
+# Options defined in nova.openstack.common.db.options
+#
+
+# The file name to use with SQLite (string value)
+#sqlite_db=nova.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous=true
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection=<None>
+
+# The SQL mode to be used for MySQL sessions (default is
+# empty, meaning do not override any server-side SQL mode
+# setting) (string value)
+#mysql_sql_mode=<None>
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect=false
+
+# seconds between db connection retries (integer value)
+#db_retry_interval=1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval=true
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval=10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries=20
+
+
+[hyperv]
+
+#
+# Options defined in nova.virt.hyperv.pathutils
+#
+
+# The name of a Windows share name mapped to the
+# "instances_path" dir and used by the resize feature to copy
+# files to the target host. If left blank, an administrative
+# share will be used, looking for the same "instances_path"
+# used locally (string value)
+#instances_path_share=
+
+
+#
+# Options defined in nova.virt.hyperv.utilsfactory
+#
+
+# Force V1 WMI utility classes (boolean value)
+#force_hyperv_utils_v1=false
+
+# Force V1 volume utility class (boolean value)
+#force_volumeutils_v1=false
+
+
+#
+# Options defined in nova.virt.hyperv.vif
+#
+
+# External virtual switch Name, if not provided, the first
+# external virtual switch is used (string value)
+#vswitch_name=<None>
+
+
+#
+# Options defined in nova.virt.hyperv.vmops
+#
+
+# Required for live migration among hosts with different CPU
+# features (boolean value)
+#limit_cpu_features=false
+
+# Sets the admin password in the config drive image (boolean
+# value)
+#config_drive_inject_password=false
+
+# Path of qemu-img command which is used to convert between
+# different image types (string value)
+#qemu_img_cmd=qemu-img.exe
+
+# Attaches the Config Drive image as a cdrom drive instead of
+# a disk drive (boolean value)
+#config_drive_cdrom=false
+
+# Enables metrics collections for an instance by using
+# Hyper-V's metric APIs. Collected data can by retrieved by
+# other apps and services, e.g.: Ceilometer. Requires Hyper-V
+# / Windows Server 2012 and above (boolean value)
+#enable_instance_metrics_collection=false
+
+# Enables dynamic memory allocation (ballooning) when set to a
+# value greater than 1. The value expresses the ratio between
+# the total RAM assigned to an instance and its startup RAM
+# amount. For example a ratio of 2.0 for an instance with
+# 1024MB of RAM implies 512MB of RAM allocated at startup
+# (floating point value)
+#dynamic_memory_ratio=1.0
+
+
+#
+# Options defined in nova.virt.hyperv.volumeops
+#
+
+# The number of times to retry to attach a volume (integer
+# value)
+#volume_attach_retry_count=10
+
+# Interval between volume attachment attempts, in seconds
+# (integer value)
+#volume_attach_retry_interval=5
+
+# The number of times to retry checking for a disk mounted via
+# iSCSI. (integer value)
+#mounted_disk_query_retry_count=10
+
+# Interval between checks for a mounted iSCSI disk, in
+# seconds. (integer value)
+#mounted_disk_query_retry_interval=5
+
+
+[image_file_url]
+
+#
+# Options defined in nova.image.download.file
+#
+
+# List of file systems that are configured in this file in the
+# image_file_url:<list entry name> sections (list value)
+#filesystems=
+
+
+[keymgr]
+
+#
+# Options defined in nova.keymgr
+#
+
+# The full class name of the key manager API class (string
+# value)
+#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager
+
+
+#
+# Options defined in nova.keymgr.conf_key_mgr
+#
+
+# Fixed key returned by key manager, specified in hex (string
+# value)
+#fixed_key=<None>
+
+
+[keystone_authtoken]
+
+#
+# Options defined in keystoneclient.middleware.auth_token
+#
+
+# Prefix to prepend at the beginning of the path (string
+# value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint (string
+# value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint(http or https)
+# (string value)
+#auth_protocol=https
+
+# Complete public Identity API endpoint (string value)
+#auth_uri=<None>
+
+# API version of the admin Identity API endpoint (string
+# value)
+#auth_version=<None>
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (boolean value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# Single shared secret with the Keystone configuration used
+# for bootstrapping a Keystone installation, or otherwise
+# bypassing the normal authentication process. (string value)
+#admin_token=<None>
+
+# Keystone account username (string value)
+#admin_user=<None>
+
+# Keystone account password (string value)
+#admin_password=<None>
+
+# Keystone service account tenant name to validate user tokens
+# (string value)
+#admin_tenant_name=admin
+
+# Env key for the swift cache (string value)
+#cache=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#certfile=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPs connections. Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens (string
+# value)
+#signing_dir=<None>
+
+# Optionally specify a list of memcached server(s) to use for
+# caching. If left undefined, tokens will instead be cached
+# in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive effort spent validating
+# tokens, the middleware caches previously-seen tokens for a
+# configurable duration (in seconds). Set to -1 to disable
+# caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens
+# is retrieved from the Identity service (in seconds). A high
+# number of revocation events combined with a low cache
+# duration may significantly reduce performance. (integer
+# value)
+#revocation_cache_time=300
+
+# (optional) if defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=<None>
+
+# (optional, mandatory if memcache_security_strategy is
+# defined) this string is used for key derivation. (string
+# value)
+#memcache_secret_key=<None>
+
+# (optional) indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+
+[libvirt]
+
+#
+# Options defined in nova.virt.libvirt.driver
+#
+
+# Rescue ami image (string value)
+#rescue_image_id=<None>
+
+# Rescue aki image (string value)
+#rescue_kernel_id=<None>
+
+# Rescue ari image (string value)
+#rescue_ramdisk_id=<None>
+
+# Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
+# xen) (string value)
+# Deprecated group/name - [DEFAULT]/libvirt_type
+#virt_type=kvm
+
+# Override the default libvirt URI (which is dependent on
+# virt_type) (string value)
+# Deprecated group/name - [DEFAULT]/libvirt_uri
+#connection_uri=
+
+# Inject the admin password at boot time, without an agent.
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/libvirt_inject_password
+#inject_password=false
+
+# Inject the ssh public key at boot time (boolean value)
+# Deprecated group/name - [DEFAULT]/libvirt_inject_key
+#inject_key=false
+
+# The partition to inject to : -2 => disable, -1 => inspect
+# (libguestfs only), 0 => not partitioned, >0 => partition
+# number (integer value)
+# Deprecated group/name - [DEFAULT]/libvirt_inject_partition
+#inject_partition=-2
+
+# Sync virtual and real mouse cursors in Windows VMs (boolean
+# value)
+#use_usb_tablet=true
+
+# Migration target URI (any included "%s" is replaced with the
+# migration target hostname) (string value)
+#live_migration_uri=qemu+tcp://%s/system
+
+# Migration flags to be set for live migration (string value)
+#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER
+
+# Migration flags to be set for block migration (string value)
+#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC
+
+# Maximum bandwidth to be used during migration, in Mbps
+# (integer value)
+#live_migration_bandwidth=0
+
+# Snapshot image format (valid options are : raw, qcow2, vmdk,
+# vdi). Defaults to same as source image (string value)
+#snapshot_image_format=<None>
+
+# DEPRECATED. The libvirt VIF driver to configure the
+# VIFs.This option is deprecated and will be removed in the
+# Juno release. (string value)
+# Deprecated group/name - [DEFAULT]/libvirt_vif_driver
+#vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
+
+# Libvirt handlers for remote volumes. (list value)
+# Deprecated group/name - [DEFAULT]/libvirt_volume_drivers
+#volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver
+
+# Override the default disk prefix for the devices attached to
+# a server, which is dependent on virt_type. (valid options
+# are: sd, xvd, uvd, vd) (string value)
+# Deprecated group/name - [DEFAULT]/libvirt_disk_prefix
+#disk_prefix=<None>
+
+# Number of seconds to wait for instance to shut down after
+# soft reboot request is made. We fall back to hard reboot if
+# instance does not shutdown within this window. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/libvirt_wait_soft_reboot_seconds
+#wait_soft_reboot_seconds=120
+
+# Set to "host-model" to clone the host CPU feature flags; to
+# "host-passthrough" to use the host CPU model exactly; to
+# "custom" to use a named CPU model; to "none" to not set any
+# CPU model. If virt_type="kvm|qemu", it will default to
+# "host-model", otherwise it will default to "none" (string
+# value)
+# Deprecated group/name - [DEFAULT]/libvirt_cpu_mode
+#cpu_mode=<None>
+
+# Set to a named libvirt CPU model (see names listed in
+# /usr/share/libvirt/cpu_map.xml). Only has effect if
+# cpu_mode="custom" and virt_type="kvm|qemu" (string value)
+# Deprecated group/name - [DEFAULT]/libvirt_cpu_model
+#cpu_model=<None>
+
+# Location where libvirt driver will store snapshots before
+# uploading them to image service (string value)
+# Deprecated group/name - [DEFAULT]/libvirt_snapshots_directory
+#snapshots_directory=$instances_path/snapshots
+
+# Location where the Xen hvmloader is kept (string value)
+#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader
+
+# Specific cachemodes to use for different disk types e.g:
+# file=directsync,block=none (list value)
+#disk_cachemodes=
+
+# A path to a device that will be used as source of entropy on
+# the host. Permitted options are: /dev/random or /dev/hwrng
+# (string value)
+#rng_dev_path=<None>
+
+
+#
+# Options defined in nova.virt.libvirt.imagebackend
+#
+
+# VM Images format. Acceptable values are: raw, qcow2, lvm,
+# rbd, default. If default is specified, then use_cow_images
+# flag is used instead of this one. (string value)
+# Deprecated group/name - [DEFAULT]/libvirt_images_type
+#images_type=default
+
+# LVM Volume Group that is used for VM images, when you
+# specify images_type=lvm. (string value)
+# Deprecated group/name - [DEFAULT]/libvirt_images_volume_group
+#images_volume_group=<None>
+
+# Create sparse logical volumes (with virtualsize) if this
+# flag is set to True. (boolean value)
+# Deprecated group/name - [DEFAULT]/libvirt_sparse_logical_volumes
+#sparse_logical_volumes=false
+
+# Method used to wipe old volumes (valid options are: none,
+# zero, shred) (string value)
+#volume_clear=zero
+
+# Size in MiB to wipe at start of old volumes. 0 => all
+# (integer value)
+#volume_clear_size=0
+
+# The RADOS pool in which rbd volumes are stored (string
+# value)
+# Deprecated group/name - [DEFAULT]/libvirt_images_rbd_pool
+#images_rbd_pool=rbd
+
+# Path to the ceph configuration file to use (string value)
+# Deprecated group/name - [DEFAULT]/libvirt_images_rbd_ceph_conf
+#images_rbd_ceph_conf=
+
+
+#
+# Options defined in nova.virt.libvirt.imagecache
+#
+
+# Allows image information files to be stored in non-standard
+# locations (string value)
+#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info
+
+# Should unused kernel images be removed? This is only safe to
+# enable if all compute nodes have been updated to support
+# this option. This will be enabled by default in future.
+# (boolean value)
+#remove_unused_kernels=false
+
+# Unused resized base images younger than this will not be
+# removed (integer value)
+#remove_unused_resized_minimum_age_seconds=3600
+
+# Write a checksum for files in _base to disk (boolean value)
+#checksum_base_images=false
+
+# How frequently to checksum base images (integer value)
+#checksum_interval_seconds=3600
+
+
+#
+# Options defined in nova.virt.libvirt.utils
+#
+
+# Compress snapshot images when possible. This currently
+# applies exclusively to qcow2 images (boolean value)
+# Deprecated group/name - [DEFAULT]/libvirt_snapshot_compression
+#snapshot_compression=false
+
+
+#
+# Options defined in nova.virt.libvirt.vif
+#
+
+# Use virtio for bridge interfaces with KVM/QEMU (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/libvirt_use_virtio_for_bridges
+#use_virtio_for_bridges=true
+
+
+#
+# Options defined in nova.virt.libvirt.volume
+#
+
+# Number of times to rescan iSCSI target to find volume
+# (integer value)
+#num_iscsi_scan_tries=5
+
+# Number of times to rescan iSER target to find volume
+# (integer value)
+#num_iser_scan_tries=5
+
+# The RADOS client name for accessing rbd volumes (string
+# value)
+#rbd_user=<None>
+
+# The libvirt UUID of the secret for the rbd_uservolumes
+# (string value)
+#rbd_secret_uuid=<None>
+
+# Directory where the NFS volume is mounted on the compute
+# node (string value)
+#nfs_mount_point_base=$state_path/mnt
+
+# Mount options passedf to the NFS client. See section of the
+# nfs man page for details (string value)
+#nfs_mount_options=<None>
+
+# Number of times to rediscover AoE target to find volume
+# (integer value)
+#num_aoe_discover_tries=3
+
+# Directory where the glusterfs volume is mounted on the
+# compute node (string value)
+#glusterfs_mount_point_base=$state_path/mnt
+
+# Use multipath connection of the iSCSI volume (boolean value)
+# Deprecated group/name - [DEFAULT]/libvirt_iscsi_use_multipath
+#iscsi_use_multipath=false
+
+# Use multipath connection of the iSER volume (boolean value)
+# Deprecated group/name - [DEFAULT]/libvirt_iser_use_multipath
+#iser_use_multipath=false
+
+# Path or URL to Scality SOFS configuration file (string
+# value)
+#scality_sofs_config=<None>
+
+# Base dir where Scality SOFS shall be mounted (string value)
+#scality_sofs_mount_point=$state_path/scality
+
+# Protocols listed here will be accessed directly from QEMU.
+# Currently supported protocols: [gluster] (list value)
+#qemu_allowed_storage_drivers=
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[metrics]
+
+#
+# Options defined in nova.scheduler.weights.metrics
+#
+
+# Multiplier used for weighing metrics. (floating point value)
+#weight_multiplier=1.0
+
+# How the metrics are going to be weighed. This should be in
+# the form of "<name1>=<ratio1>, <name2>=<ratio2>, ...", where
+# <nameX> is one of the metrics to be weighed, and <ratioX> is
+# the corresponding ratio. So for "name1=1.0, name2=-1.0" The
+# final weight would be name1.value * 1.0 + name2.value *
+# -1.0. (list value)
+#weight_setting=
+
+# How to treat the unavailable metrics. When a metric is NOT
+# available for a host, if it is set to be True, it would
+# raise an exception, so it is recommended to use the
+# scheduler filter MetricFilter to filter out those hosts. If
+# it is set to be False, the unavailable metric would be
+# treated as a negative factor in weighing process, the
+# returned value would be set by the option
+# weight_of_unavailable. (boolean value)
+#required=true
+
+# The final weight value to be returned if required is set to
+# False and any one of the metrics set by weight_setting is
+# unavailable. (floating point value)
+#weight_of_unavailable=-10000.0
+
+
+[osapi_v3]
+
+#
+# Options defined in nova.api.openstack
+#
+
+# Whether the V3 API is enabled or not (boolean value)
+#enabled=false
+
+# A list of v3 API extensions to never load. Specify the
+# extension aliases here. (list value)
+#extensions_blacklist=
+
+# If the list is not empty then a v3 API extension will only
+# be loaded if it exists in this list. Specify the extension
+# aliases here. (list value)
+#extensions_whitelist=
+
+
+[rdp]
+
+#
+# Options defined in nova.rdp
+#
+
+# Location of RDP html5 console proxy, in the form
+# "http://127.0.0.1:6083/" (string value)
+#html5_proxy_base_url=http://127.0.0.1:6083/
+
+# Enable RDP related features (boolean value)
+#enabled=false
+
+
+[spice]
+
+#
+# Options defined in nova.spice
+#
+
+# Location of spice HTML5 console proxy, in the form
+# "http://127.0.0.1:6082/spice_auto.html" (string value)
+#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html
+
+# IP address on which instance spice server should listen
+# (string value)
+#server_listen=127.0.0.1
+
+# The address to which proxy clients (like nova-
+# spicehtml5proxy) should connect (string value)
+#server_proxyclient_address=127.0.0.1
+
+# Enable spice related features (boolean value)
+#enabled=false
+
+# Enable spice guest agent support (boolean value)
+#agent_enabled=true
+
+# Keymap for spice (string value)
+#keymap=en-us
+
+
+[ssl]
+
+#
+# Options defined in nova.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients.
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely.
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely.
+# (string value)
+#key_file=<None>
+
+
+[trusted_computing]
+
+#
+# Options defined in nova.scheduler.filters.trusted_filter
+#
+
+# Attestation server HTTP (string value)
+#attestation_server=<None>
+
+# Attestation server Cert file for Identity verification
+# (string value)
+#attestation_server_ca_file=<None>
+
+# Attestation server port (string value)
+#attestation_port=8443
+
+# Attestation web API URL (string value)
+#attestation_api_url=/OpenAttestationWebServices/V1.0
+
+# Attestation authorization blob - must change (string value)
+#attestation_auth_blob=<None>
+
+# Attestation status cache valid period length (integer value)
+#attestation_auth_timeout=60
+
+
+[upgrade_levels]
+
+#
+# Options defined in nova.baserpc
+#
+
+# Set a version cap for messages sent to the base api in any
+# service (string value)
+#baseapi=<None>
+
+
+#
+# Options defined in nova.cells.rpc_driver
+#
+
+# Set a version cap for messages sent between cells services
+# (string value)
+#intercell=<None>
+
+
+#
+# Options defined in nova.cells.rpcapi
+#
+
+# Set a version cap for messages sent to local cells services
+# (string value)
+#cells=<None>
+
+
+#
+# Options defined in nova.cert.rpcapi
+#
+
+# Set a version cap for messages sent to cert services (string
+# value)
+#cert=<None>
+
+
+#
+# Options defined in nova.compute.rpcapi
+#
+
+# Set a version cap for messages sent to compute services. If
+# you plan to do a live upgrade from havana to icehouse, you
+# should set this option to "icehouse-compat" before beginning
+# the live upgrade procedure. (string value)
+#compute=<None>
+
+
+#
+# Options defined in nova.conductor.rpcapi
+#
+
+# Set a version cap for messages sent to conductor services
+# (string value)
+#conductor=<None>
+
+
+#
+# Options defined in nova.console.rpcapi
+#
+
+# Set a version cap for messages sent to console services
+# (string value)
+#console=<None>
+
+
+#
+# Options defined in nova.consoleauth.rpcapi
+#
+
+# Set a version cap for messages sent to consoleauth services
+# (string value)
+#consoleauth=<None>
+
+
+#
+# Options defined in nova.network.rpcapi
+#
+
+# Set a version cap for messages sent to network services
+# (string value)
+#network=<None>
+
+
+#
+# Options defined in nova.scheduler.rpcapi
+#
+
+# Set a version cap for messages sent to scheduler services
+# (string value)
+#scheduler=<None>
+
+
+[vmware]
+
+#
+# Options defined in nova.virt.vmwareapi.driver
+#
+
+# Hostname or IP address for connection to VMware ESX/VC host.
+# (string value)
+#host_ip=<None>
+
+# Username for connection to VMware ESX/VC host. (string
+# value)
+#host_username=<None>
+
+# Password for connection to VMware ESX/VC host. (string
+# value)
+#host_password=<None>
+
+# Name of a VMware Cluster ComputeResource. Used only if
+# compute_driver is vmwareapi.VMwareVCDriver. (multi valued)
+#cluster_name=<None>
+
+# Regex to match the name of a datastore. (string value)
+#datastore_regex=<None>
+
+# The interval used for polling of remote tasks. (floating
+# point value)
+#task_poll_interval=0.5
+
+# The number of times we retry on failures, e.g., socket
+# error, etc. (integer value)
+#api_retry_count=10
+
+# VNC starting port (integer value)
+#vnc_port=5900
+
+# Total number of VNC ports (integer value)
+#vnc_port_total=10000
+
+# Whether to use linked clone (boolean value)
+#use_linked_clone=true
+
+
+#
+# Options defined in nova.virt.vmwareapi.vif
+#
+
+# Physical ethernet adapter name for vlan networking (string
+# value)
+#vlan_interface=vmnic0
+
+
+#
+# Options defined in nova.virt.vmwareapi.vim
+#
+
+# Optional VIM Service WSDL Location e.g
+# http://<server>/vimService.wsdl. Optional over-ride to
+# default location for bug work-arounds (string value)
+#wsdl_location=<None>
+
+
+#
+# Options defined in nova.virt.vmwareapi.vim_util
+#
+
+# The maximum number of ObjectContent data objects that should
+# be returned in a single result. A positive value will cause
+# the operation to suspend the retrieval when the count of
+# objects reaches the specified maximum. The server may still
+# limit the count to something less than the configured value.
+# Any remaining objects may be retrieved with additional
+# requests. (integer value)
+#maximum_objects=100
+
+
+#
+# Options defined in nova.virt.vmwareapi.vmops
+#
+
+# Name of Integration Bridge (string value)
+#integration_bridge=br-int
+
+
+[xenserver]
+
+#
+# Options defined in nova.virt.xenapi.agent
+#
+
+# Number of seconds to wait for agent reply (integer value)
+# Deprecated group/name - [DEFAULT]/agent_timeout
+#agent_timeout=30
+
+# Number of seconds to wait for agent to be fully operational
+# (integer value)
+# Deprecated group/name - [DEFAULT]/agent_version_timeout
+#agent_version_timeout=300
+
+# Number of seconds to wait for agent reply to resetnetwork
+# request (integer value)
+# Deprecated group/name - [DEFAULT]/agent_resetnetwork_timeout
+#agent_resetnetwork_timeout=60
+
+# Specifies the path in which the XenAPI guest agent should be
+# located. If the agent is present, network configuration is
+# not injected into the image. Used if
+# compute_driver=xenapi.XenAPIDriver and flat_injected=True
+# (string value)
+# Deprecated group/name - [DEFAULT]/xenapi_agent_path
+#agent_path=usr/sbin/xe-update-networking
+
+# Disables the use of the XenAPI agent in any image regardless
+# of what image properties are present. (boolean value)
+# Deprecated group/name - [DEFAULT]/xenapi_disable_agent
+#disable_agent=false
+
+# Determines if the XenAPI agent should be used when the image
+# used does not contain a hint to declare if the agent is
+# present or not. The hint is a glance property
+# "xenapi_use_agent" that has the value "True" or "False".
+# Note that waiting for the agent when it is not present will
+# significantly increase server boot times. (boolean value)
+# Deprecated group/name - [DEFAULT]/xenapi_use_agent_default
+#use_agent_default=false
+
+
+#
+# Options defined in nova.virt.xenapi.client.session
+#
+
+# Timeout in seconds for XenAPI login. (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_login_timeout
+#login_timeout=10
+
+# Maximum number of concurrent XenAPI connections. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_connection_concurrent
+#connection_concurrent=5
+
+
+#
+# Options defined in nova.virt.xenapi.driver
+#
+
+# URL for connection to XenServer/Xen Cloud Platform. A
+# special value of unix://local can be used to connect to the
+# local unix socket. Required if
+# compute_driver=xenapi.XenAPIDriver (string value)
+# Deprecated group/name - [DEFAULT]/xenapi_connection_url
+#connection_url=<None>
+
+# Username for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+# Deprecated group/name - [DEFAULT]/xenapi_connection_username
+#connection_username=root
+
+# Password for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+# Deprecated group/name - [DEFAULT]/xenapi_connection_password
+#connection_password=<None>
+
+# The interval used for polling of coalescing vhds. Used only
+# if compute_driver=xenapi.XenAPIDriver (floating point value)
+# Deprecated group/name - [DEFAULT]/xenapi_vhd_coalesce_poll_interval
+#vhd_coalesce_poll_interval=5.0
+
+# Ensure compute service is running on host XenAPI connects
+# to. (boolean value)
+# Deprecated group/name - [DEFAULT]/xenapi_check_host
+#check_host=true
+
+# Max number of times to poll for VHD to coalesce. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_vhd_coalesce_max_attempts
+#vhd_coalesce_max_attempts=20
+
+# Base path to the storage repository (string value)
+# Deprecated group/name - [DEFAULT]/xenapi_sr_base_path
+#sr_base_path=/var/run/sr-mount
+
+# The iSCSI Target Host (string value)
+# Deprecated group/name - [DEFAULT]/target_host
+#target_host=<None>
+
+# The iSCSI Target Port, default is port 3260 (string value)
+# Deprecated group/name - [DEFAULT]/target_port
+#target_port=3260
+
+# IQN Prefix (string value)
+# Deprecated group/name - [DEFAULT]/iqn_prefix
+#iqn_prefix=iqn.2010-10.org.openstack
+
+# Used to enable the remapping of VBD dev (Works around an
+# issue in Ubuntu Maverick) (boolean value)
+# Deprecated group/name - [DEFAULT]/xenapi_remap_vbd_dev
+#remap_vbd_dev=false
+
+# Specify prefix to remap VBD dev to (ex. /dev/xvdb ->
+# /dev/sdb) (string value)
+# Deprecated group/name - [DEFAULT]/xenapi_remap_vbd_dev_prefix
+#remap_vbd_dev_prefix=sd
+
+
+#
+# Options defined in nova.virt.xenapi.image.bittorrent
+#
+
+# Base URL for torrent files. (string value)
+# Deprecated group/name - [DEFAULT]/xenapi_torrent_base_url
+#torrent_base_url=<None>
+
+# Probability that peer will become a seeder. (1.0 = 100%)
+# (floating point value)
+# Deprecated group/name - [DEFAULT]/xenapi_torrent_seed_chance
+#torrent_seed_chance=1.0
+
+# Number of seconds after downloading an image via BitTorrent
+# that it should be seeded for other peers. (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_torrent_seed_duration
+#torrent_seed_duration=3600
+
+# Cached torrent files not accessed within this number of
+# seconds can be reaped (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_torrent_max_last_accessed
+#torrent_max_last_accessed=86400
+
+# Beginning of port range to listen on (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_torrent_listen_port_start
+#torrent_listen_port_start=6881
+
+# End of port range to listen on (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_torrent_listen_port_end
+#torrent_listen_port_end=6891
+
+# Number of seconds a download can remain at the same progress
+# percentage w/o being considered a stall (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_torrent_download_stall_cutoff
+#torrent_download_stall_cutoff=600
+
+# Maximum number of seeder processes to run concurrently
+# within a given dom0. (-1 = no limit) (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_torrent_max_seeder_processes_per_host
+#torrent_max_seeder_processes_per_host=1
+
+
+#
+# Options defined in nova.virt.xenapi.pool
+#
+
+# To use for hosts with different CPUs (boolean value)
+# Deprecated group/name - [DEFAULT]/use_join_force
+#use_join_force=true
+
+
+#
+# Options defined in nova.virt.xenapi.vif
+#
+
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+# Deprecated group/name - [DEFAULT]/xenapi_ovs_integration_bridge
+#ovs_integration_bridge=xapi1
+
+
+#
+# Options defined in nova.virt.xenapi.vm_utils
+#
+
+# Cache glance images locally. `all` will cache all images,
+# `some` will only cache images that have the image_property
+# `cache_in_nova=True`, and `none` turns off caching entirely
+# (string value)
+# Deprecated group/name - [DEFAULT]/cache_images
+#cache_images=all
+
+# Compression level for images, e.g., 9 for gzip -9. Range is
+# 1-9, 9 being most compressed but most CPU intensive on dom0.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_image_compression_level
+#image_compression_level=<None>
+
+# Default OS type (string value)
+# Deprecated group/name - [DEFAULT]/default_os_type
+#default_os_type=linux
+
+# Time to wait for a block device to be created (integer
+# value)
+# Deprecated group/name - [DEFAULT]/block_device_creation_timeout
+#block_device_creation_timeout=10
+
+# Maximum size in bytes of kernel or ramdisk images (integer
+# value)
+# Deprecated group/name - [DEFAULT]/max_kernel_ramdisk_size
+#max_kernel_ramdisk_size=16777216
+
+# Filter for finding the SR to be used to install guest
+# instances on. To use the Local Storage in default
+# XenServer/XCP installations set this flag to other-config
+# :i18n-key=local-storage. To select an SR with a different
+# matching criteria, you could set it to other-
+# config:my_favorite_sr=true. On the other hand, to fall back
+# on the Default SR, as displayed by XenCenter, set this flag
+# to: default-sr:true (string value)
+# Deprecated group/name - [DEFAULT]/sr_matching_filter
+#sr_matching_filter=default-sr:true
+
+# Whether to use sparse_copy for copying data on a resize down
+# (False will use standard dd). This speeds up resizes down
+# considerably since large runs of zeros won't have to be
+# rsynced (boolean value)
+# Deprecated group/name - [DEFAULT]/xenapi_sparse_copy
+#sparse_copy=true
+
+# Maximum number of retries to unplug VBD (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_num_vbd_unplug_retries
+#num_vbd_unplug_retries=10
+
+# Whether or not to download images via Bit Torrent
+# (all|some|none). (string value)
+# Deprecated group/name - [DEFAULT]/xenapi_torrent_images
+#torrent_images=none
+
+# Name of network to use for booting iPXE ISOs (string value)
+# Deprecated group/name - [DEFAULT]/xenapi_ipxe_network_name
+#ipxe_network_name=<None>
+
+# URL to the iPXE boot menu (string value)
+# Deprecated group/name - [DEFAULT]/xenapi_ipxe_boot_menu_url
+#ipxe_boot_menu_url=<None>
+
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+# Deprecated group/name - [DEFAULT]/xenapi_ipxe_mkisofs_cmd
+#ipxe_mkisofs_cmd=mkisofs
+
+
+#
+# Options defined in nova.virt.xenapi.vmops
+#
+
+# Number of seconds to wait for instance to go to running
+# state (integer value)
+# Deprecated group/name - [DEFAULT]/xenapi_running_timeout
+#running_timeout=60
+
+# The XenAPI VIF driver using XenServer Network APIs. (string
+# value)
+# Deprecated group/name - [DEFAULT]/xenapi_vif_driver
+#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
+
+# Dom0 plugin driver used to handle image uploads. (string
+# value)
+# Deprecated group/name - [DEFAULT]/xenapi_image_upload_handler
+#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore
+
+
+#
+# Options defined in nova.virt.xenapi.volume_utils
+#
+
+# Number of seconds to wait for an SR to settle if the VDI
+# does not exist when first introduced (integer value)
+#introduce_vdi_retry_wait=20
+
+
+[zookeeper]
+
+#
+# Options defined in nova.servicegroup.drivers.zk
+#
+
+# The ZooKeeper addresses for servicegroup service in the
+# format of host1:port,host2:port,host3:port (string value)
+#address=<None>
+
+# The recv_timeout parameter for the zk session (integer
+# value)
+#recv_timeout=4000
+
+# The prefix used in ZooKeeper to store ephemeral nodes
+# (string value)
+#sg_prefix=/servicegroups
+
+# Number of seconds to wait until retrying to join the session
+# (integer value)
+#sg_retry_interval=5
+
+
+
diff --git a/openstack/etc/nova/policy.json b/openstack/etc/nova/policy.json
new file mode 100644
index 00000000..cc5b8ea4
--- /dev/null
+++ b/openstack/etc/nova/policy.json
@@ -0,0 +1,324 @@
+{
+ "context_is_admin": "role:admin",
+ "admin_or_owner": "is_admin:True or project_id:%(project_id)s",
+ "default": "rule:admin_or_owner",
+
+ "cells_scheduler_filter:TargetCellFilter": "is_admin:True",
+
+ "compute:create": "",
+ "compute:create:attach_network": "",
+ "compute:create:attach_volume": "",
+ "compute:create:forced_host": "is_admin:True",
+ "compute:get_all": "",
+ "compute:get_all_tenants": "",
+ "compute:start": "rule:admin_or_owner",
+ "compute:stop": "rule:admin_or_owner",
+ "compute:unlock_override": "rule:admin_api",
+
+ "compute:shelve": "",
+ "compute:shelve_offload": "",
+ "compute:unshelve": "",
+
+ "compute:volume_snapshot_create": "",
+ "compute:volume_snapshot_delete": "",
+
+ "admin_api": "is_admin:True",
+ "compute:v3:servers:start": "rule:admin_or_owner",
+ "compute:v3:servers:stop": "rule:admin_or_owner",
+ "compute_extension:v3:os-access-ips:discoverable": "",
+ "compute_extension:v3:os-access-ips": "",
+ "compute_extension:accounts": "rule:admin_api",
+ "compute_extension:admin_actions": "rule:admin_api",
+ "compute_extension:admin_actions:pause": "rule:admin_or_owner",
+ "compute_extension:admin_actions:unpause": "rule:admin_or_owner",
+ "compute_extension:admin_actions:suspend": "rule:admin_or_owner",
+ "compute_extension:admin_actions:resume": "rule:admin_or_owner",
+ "compute_extension:admin_actions:lock": "rule:admin_or_owner",
+ "compute_extension:admin_actions:unlock": "rule:admin_or_owner",
+ "compute_extension:admin_actions:resetNetwork": "rule:admin_api",
+ "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api",
+ "compute_extension:admin_actions:createBackup": "rule:admin_or_owner",
+ "compute_extension:admin_actions:migrateLive": "rule:admin_api",
+ "compute_extension:admin_actions:resetState": "rule:admin_api",
+ "compute_extension:admin_actions:migrate": "rule:admin_api",
+ "compute_extension:v3:os-admin-actions": "rule:admin_api",
+ "compute_extension:v3:os-admin-actions:discoverable": "",
+ "compute_extension:v3:os-admin-actions:reset_network": "rule:admin_api",
+ "compute_extension:v3:os-admin-actions:inject_network_info": "rule:admin_api",
+ "compute_extension:v3:os-admin-actions:reset_state": "rule:admin_api",
+ "compute_extension:v3:os-admin-password": "",
+ "compute_extension:v3:os-admin-password:discoverable": "",
+ "compute_extension:aggregates": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:discoverable": "",
+ "compute_extension:v3:os-aggregates:index": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:create": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:show": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:update": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:delete": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:add_host": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api",
+ "compute_extension:agents": "rule:admin_api",
+ "compute_extension:v3:os-agents": "rule:admin_api",
+ "compute_extension:v3:os-agents:discoverable": "",
+ "compute_extension:attach_interfaces": "",
+ "compute_extension:v3:os-attach-interfaces": "",
+ "compute_extension:v3:os-attach-interfaces:discoverable": "",
+ "compute_extension:baremetal_nodes": "rule:admin_api",
+ "compute_extension:cells": "rule:admin_api",
+ "compute_extension:v3:os-cells": "rule:admin_api",
+ "compute_extension:v3:os-cells:discoverable": "",
+ "compute_extension:certificates": "",
+ "compute_extension:v3:os-certificates:create": "",
+ "compute_extension:v3:os-certificates:show": "",
+ "compute_extension:v3:os-certificates:discoverable": "",
+ "compute_extension:cloudpipe": "rule:admin_api",
+ "compute_extension:cloudpipe_update": "rule:admin_api",
+ "compute_extension:console_output": "",
+ "compute_extension:v3:consoles:discoverable": "",
+ "compute_extension:v3:os-console-output:discoverable": "",
+ "compute_extension:v3:os-console-output": "",
+ "compute_extension:consoles": "",
+ "compute_extension:v3:os-remote-consoles": "",
+ "compute_extension:v3:os-remote-consoles:discoverable": "",
+ "compute_extension:createserverext": "",
+ "compute_extension:v3:os-create-backup:discoverable": "",
+ "compute_extension:v3:os-create-backup": "rule:admin_or_owner",
+ "compute_extension:deferred_delete": "",
+ "compute_extension:v3:os-deferred-delete": "",
+ "compute_extension:v3:os-deferred-delete:discoverable": "",
+ "compute_extension:disk_config": "",
+ "compute_extension:evacuate": "rule:admin_api",
+ "compute_extension:v3:os-evacuate": "rule:admin_api",
+ "compute_extension:v3:os-evacuate:discoverable": "",
+ "compute_extension:extended_server_attributes": "rule:admin_api",
+ "compute_extension:v3:os-extended-server-attributes": "rule:admin_api",
+ "compute_extension:v3:os-extended-server-attributes:discoverable": "",
+ "compute_extension:extended_status": "",
+ "compute_extension:v3:os-extended-status": "",
+ "compute_extension:v3:os-extended-status:discoverable": "",
+ "compute_extension:extended_availability_zone": "",
+ "compute_extension:v3:os-extended-availability-zone": "",
+ "compute_extension:v3:os-extended-availability-zone:discoverable": "",
+ "compute_extension:extended_ips": "",
+ "compute_extension:extended_ips_mac": "",
+ "compute_extension:extended_vif_net": "",
+ "compute_extension:v3:extension_info:discoverable": "",
+ "compute_extension:extended_volumes": "",
+ "compute_extension:v3:os-extended-volumes": "",
+ "compute_extension:v3:os-extended-volumes:swap": "",
+ "compute_extension:v3:os-extended-volumes:discoverable": "",
+ "compute_extension:v3:os-extended-volumes:attach": "",
+ "compute_extension:v3:os-extended-volumes:detach": "",
+ "compute_extension:fixed_ips": "rule:admin_api",
+ "compute_extension:flavor_access": "",
+ "compute_extension:flavor_access:addTenantAccess": "rule:admin_api",
+ "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api",
+ "compute_extension:v3:flavor-access": "",
+ "compute_extension:v3:flavor-access:discoverable": "",
+ "compute_extension:v3:flavor-access:remove_tenant_access": "rule:admin_api",
+ "compute_extension:v3:flavor-access:add_tenant_access": "rule:admin_api",
+ "compute_extension:flavor_disabled": "",
+ "compute_extension:flavor_rxtx": "",
+ "compute_extension:v3:os-flavor-rxtx": "",
+ "compute_extension:v3:os-flavor-rxtx:discoverable": "",
+ "compute_extension:flavor_swap": "",
+ "compute_extension:flavorextradata": "",
+ "compute_extension:flavorextraspecs:index": "",
+ "compute_extension:flavorextraspecs:show": "",
+ "compute_extension:flavorextraspecs:create": "rule:admin_api",
+ "compute_extension:flavorextraspecs:update": "rule:admin_api",
+ "compute_extension:flavorextraspecs:delete": "rule:admin_api",
+ "compute_extension:v3:flavors:discoverable": "",
+ "compute_extension:v3:flavor-extra-specs:discoverable": "",
+ "compute_extension:v3:flavor-extra-specs:index": "",
+ "compute_extension:v3:flavor-extra-specs:show": "",
+ "compute_extension:v3:flavor-extra-specs:create": "rule:admin_api",
+ "compute_extension:v3:flavor-extra-specs:update": "rule:admin_api",
+ "compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api",
+ "compute_extension:flavormanage": "rule:admin_api",
+ "compute_extension:v3:flavor-manage": "rule:admin_api",
+ "compute_extension:floating_ip_dns": "",
+ "compute_extension:floating_ip_pools": "",
+ "compute_extension:floating_ips": "",
+ "compute_extension:floating_ips_bulk": "rule:admin_api",
+ "compute_extension:fping": "",
+ "compute_extension:fping:all_tenants": "rule:admin_api",
+ "compute_extension:hide_server_addresses": "is_admin:False",
+ "compute_extension:v3:os-hide-server-addresses": "is_admin:False",
+ "compute_extension:v3:os-hide-server-addresses:discoverable": "",
+ "compute_extension:hosts": "rule:admin_api",
+ "compute_extension:v3:os-hosts": "rule:admin_api",
+ "compute_extension:v3:os-hosts:discoverable": "",
+ "compute_extension:hypervisors": "rule:admin_api",
+ "compute_extension:v3:os-hypervisors": "rule:admin_api",
+ "compute_extension:v3:os-hypervisors:discoverable": "",
+ "compute_extension:image_size": "",
+ "compute_extension:instance_actions": "",
+ "compute_extension:v3:os-server-actions": "",
+ "compute_extension:v3:os-server-actions:discoverable": "",
+ "compute_extension:instance_actions:events": "rule:admin_api",
+ "compute_extension:v3:os-server-actions:events": "rule:admin_api",
+ "compute_extension:instance_usage_audit_log": "rule:admin_api",
+ "compute_extension:v3:ips:discoverable": "",
+ "compute_extension:keypairs": "",
+ "compute_extension:keypairs:index": "",
+ "compute_extension:keypairs:show": "",
+ "compute_extension:keypairs:create": "",
+ "compute_extension:keypairs:delete": "",
+ "compute_extension:v3:keypairs:discoverable": "",
+ "compute_extension:v3:keypairs": "",
+ "compute_extension:v3:keypairs:index": "",
+ "compute_extension:v3:keypairs:show": "",
+ "compute_extension:v3:keypairs:create": "",
+ "compute_extension:v3:keypairs:delete": "",
+ "compute_extension:v3:os-lock-server:discoverable": "",
+ "compute_extension:v3:os-lock-server:lock": "rule:admin_or_owner",
+ "compute_extension:v3:os-lock-server:unlock": "rule:admin_or_owner",
+ "compute_extension:v3:os-migrate-server:discoverable": "",
+ "compute_extension:v3:os-migrate-server:migrate": "rule:admin_api",
+ "compute_extension:v3:os-migrate-server:migrate_live": "rule:admin_api",
+ "compute_extension:multinic": "",
+ "compute_extension:v3:os-multinic": "",
+ "compute_extension:v3:os-multinic:discoverable": "",
+ "compute_extension:networks": "rule:admin_api",
+ "compute_extension:networks:view": "",
+ "compute_extension:networks_associate": "rule:admin_api",
+ "compute_extension:v3:os-pause-server:discoverable": "",
+ "compute_extension:v3:os-pause-server:pause": "rule:admin_or_owner",
+ "compute_extension:v3:os-pause-server:unpause": "rule:admin_or_owner",
+ "compute_extension:v3:os-pci:pci_servers": "",
+ "compute_extension:v3:os-pci:discoverable": "",
+ "compute_extension:v3:os-pci:index": "rule:admin_api",
+ "compute_extension:v3:os-pci:detail": "rule:admin_api",
+ "compute_extension:v3:os-pci:show": "rule:admin_api",
+ "compute_extension:quotas:show": "",
+ "compute_extension:quotas:update": "rule:admin_api",
+ "compute_extension:quotas:delete": "rule:admin_api",
+ "compute_extension:v3:os-quota-sets:discoverable": "",
+ "compute_extension:v3:os-quota-sets:show": "",
+ "compute_extension:v3:os-quota-sets:update": "rule:admin_api",
+ "compute_extension:v3:os-quota-sets:delete": "rule:admin_api",
+ "compute_extension:v3:os-quota-sets:detail": "rule:admin_api",
+ "compute_extension:quota_classes": "",
+ "compute_extension:rescue": "",
+ "compute_extension:v3:os-rescue": "",
+ "compute_extension:v3:os-rescue:discoverable": "",
+ "compute_extension:v3:os-scheduler-hints:discoverable": "",
+ "compute_extension:security_group_default_rules": "rule:admin_api",
+ "compute_extension:security_groups": "",
+ "compute_extension:v3:os-security-groups": "",
+ "compute_extension:v3:os-security-groups:discoverable": "",
+ "compute_extension:server_diagnostics": "rule:admin_api",
+ "compute_extension:v3:os-server-diagnostics": "rule:admin_api",
+ "compute_extension:v3:os-server-diagnostics:discoverable": "",
+ "compute_extension:server_groups": "",
+ "compute_extension:server_password": "",
+ "compute_extension:v3:os-server-password": "",
+ "compute_extension:v3:os-server-password:discoverable": "",
+ "compute_extension:server_usage": "",
+ "compute_extension:v3:os-server-usage": "",
+ "compute_extension:v3:os-server-usage:discoverable": "",
+ "compute_extension:services": "rule:admin_api",
+ "compute_extension:v3:os-services": "rule:admin_api",
+ "compute_extension:v3:os-services:discoverable": "",
+ "compute_extension:v3:server-metadata:discoverable": "",
+ "compute_extension:v3:servers:discoverable": "",
+ "compute_extension:shelve": "",
+ "compute_extension:shelveOffload": "rule:admin_api",
+ "compute_extension:v3:os-shelve:shelve": "",
+ "compute_extension:v3:os-shelve:shelve:discoverable": "",
+ "compute_extension:v3:os-shelve:shelve_offload": "rule:admin_api",
+ "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner",
+ "compute_extension:v3:os-suspend-server:discoverable": "",
+ "compute_extension:v3:os-suspend-server:suspend": "rule:admin_or_owner",
+ "compute_extension:v3:os-suspend-server:resume": "rule:admin_or_owner",
+ "compute_extension:simple_tenant_usage:list": "rule:admin_api",
+ "compute_extension:unshelve": "",
+ "compute_extension:v3:os-shelve:unshelve": "",
+ "compute_extension:users": "rule:admin_api",
+ "compute_extension:v3:os-user-data:discoverable": "",
+ "compute_extension:virtual_interfaces": "",
+ "compute_extension:virtual_storage_arrays": "",
+ "compute_extension:volumes": "",
+ "compute_extension:volume_attachments:index": "",
+ "compute_extension:volume_attachments:show": "",
+ "compute_extension:volume_attachments:create": "",
+ "compute_extension:volume_attachments:update": "",
+ "compute_extension:volume_attachments:delete": "",
+ "compute_extension:volumetypes": "",
+ "compute_extension:availability_zone:list": "",
+ "compute_extension:v3:os-availability-zone:list": "",
+ "compute_extension:v3:os-availability-zone:discoverable": "",
+ "compute_extension:availability_zone:detail": "rule:admin_api",
+ "compute_extension:v3:os-availability-zone:detail": "rule:admin_api",
+ "compute_extension:used_limits_for_admin": "rule:admin_api",
+ "compute_extension:migrations:index": "rule:admin_api",
+ "compute_extension:v3:os-migrations:index": "rule:admin_api",
+ "compute_extension:v3:os-migrations:discoverable": "",
+ "compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api",
+ "compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api",
+ "compute_extension:console_auth_tokens": "rule:admin_api",
+ "compute_extension:v3:os-console-auth-tokens": "rule:admin_api",
+ "compute_extension:os-server-external-events:create": "rule:admin_api",
+ "compute_extension:v3:os-server-external-events:create": "rule:admin_api",
+
+ "volume:create": "",
+ "volume:get_all": "",
+ "volume:get_volume_metadata": "",
+ "volume:get_snapshot": "",
+ "volume:get_all_snapshots": "",
+
+
+ "volume_extension:types_manage": "rule:admin_api",
+ "volume_extension:types_extra_specs": "rule:admin_api",
+ "volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
+
+
+ "network:get_all": "",
+ "network:get": "",
+ "network:create": "",
+ "network:delete": "",
+ "network:associate": "",
+ "network:disassociate": "",
+ "network:get_vifs_by_instance": "",
+ "network:allocate_for_instance": "",
+ "network:deallocate_for_instance": "",
+ "network:validate_networks": "",
+ "network:get_instance_uuids_by_ip_filter": "",
+ "network:get_instance_id_by_floating_address": "",
+ "network:setup_networks_on_host": "",
+ "network:get_backdoor_port": "",
+
+ "network:get_floating_ip": "",
+ "network:get_floating_ip_pools": "",
+ "network:get_floating_ip_by_address": "",
+ "network:get_floating_ips_by_project": "",
+ "network:get_floating_ips_by_fixed_address": "",
+ "network:allocate_floating_ip": "",
+ "network:deallocate_floating_ip": "",
+ "network:associate_floating_ip": "",
+ "network:disassociate_floating_ip": "",
+ "network:release_floating_ip": "",
+ "network:migrate_instance_start": "",
+ "network:migrate_instance_finish": "",
+
+ "network:get_fixed_ip": "",
+ "network:get_fixed_ip_by_address": "",
+ "network:add_fixed_ip_to_instance": "",
+ "network:remove_fixed_ip_from_instance": "",
+ "network:add_network_to_project": "",
+ "network:get_instance_nw_info": "",
+
+ "network:get_dns_domains": "",
+ "network:add_dns_entry": "",
+ "network:modify_dns_entry": "",
+ "network:delete_dns_entry": "",
+ "network:get_dns_entries_by_address": "",
+ "network:get_dns_entries_by_name": "",
+ "network:create_private_dns_domain": "",
+ "network:create_public_dns_domain": "",
+ "network:delete_dns_domain": ""
+}
diff --git a/openstack/etc/nova/release.sample b/openstack/etc/nova/release.sample
new file mode 100644
index 00000000..4c0d8e48
--- /dev/null
+++ b/openstack/etc/nova/release.sample
@@ -0,0 +1,4 @@
+[Nova]
+vendor = Fedora Project
+product = OpenStack Nova
+package = 1.fc18
diff --git a/openstack/etc/nova/rootwrap.conf b/openstack/etc/nova/rootwrap.conf
new file mode 100644
index 00000000..aa466c5d
--- /dev/null
+++ b/openstack/etc/nova/rootwrap.conf
@@ -0,0 +1,27 @@
+# Configuration for nova-rootwrap
+# This file should be owned by (and only-writeable by) the root user
+
+[DEFAULT]
+# List of directories to load filter definitions from (separated by ',').
+# These directories MUST all be only writeable by root !
+filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
+
+# List of directories to search executables in, in case filters do not
+# explicitely specify a full path (separated by ',')
+# If not specified, defaults to system PATH environment variable.
+# These directories MUST all be only writeable by root !
+exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
+
+# Enable logging to syslog
+# Default value is False
+use_syslog=False
+
+# Which syslog facility to use.
+# Valid values include auth, authpriv, syslog, local0, local1...
+# Default value is 'syslog'
+syslog_log_facility=syslog
+
+# Which messages to log.
+# INFO means log all usage
+# ERROR means only log unsuccessful attempts
+syslog_log_level=ERROR
diff --git a/openstack/etc/nova/rootwrap.d/api-metadata.filters b/openstack/etc/nova/rootwrap.d/api-metadata.filters
new file mode 100644
index 00000000..1aa6f83e
--- /dev/null
+++ b/openstack/etc/nova/rootwrap.d/api-metadata.filters
@@ -0,0 +1,13 @@
+# nova-rootwrap command filters for api-metadata nodes
+# This is needed on nova-api hosts running with "metadata" in enabled_apis
+# or when running nova-api-metadata
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
+iptables-save: CommandFilter, iptables-save, root
+ip6tables-save: CommandFilter, ip6tables-save, root
+
+# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
diff --git a/openstack/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters b/openstack/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters
new file mode 100644
index 00000000..4132a999
--- /dev/null
+++ b/openstack/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters
@@ -0,0 +1,9 @@
+# nova-rootwrap command filters for compute nodes
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# nova/virt/baremetal/ipmi.py: 'ipmitool', ..
+ipmitool: CommandFilter, ipmitool, root
+
+# nova/virt/baremetal/ipmi.py: 'kill', '-TERM', str(console_pid)
+kill_shellinaboxd: KillFilter, root, /usr/local/bin/shellinaboxd, -15, -TERM
diff --git a/openstack/etc/nova/rootwrap.d/baremetal-deploy-helper.filters b/openstack/etc/nova/rootwrap.d/baremetal-deploy-helper.filters
new file mode 100644
index 00000000..6d14b5d9
--- /dev/null
+++ b/openstack/etc/nova/rootwrap.d/baremetal-deploy-helper.filters
@@ -0,0 +1,11 @@
+# nova-rootwrap command filters for nova-baremetal-deploy-helper
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# nova-baremetal-deploy-helper
+iscsiadm: CommandFilter, iscsiadm, root
+sfdisk: CommandFilter, sfdisk, root
+dd: CommandFilter, dd, root
+mkswap: CommandFilter, mkswap, root
+blkid: CommandFilter, blkid, root
+mkfs: CommandFilter, mkfs, root
diff --git a/openstack/etc/nova/rootwrap.d/compute.filters b/openstack/etc/nova/rootwrap.d/compute.filters
new file mode 100644
index 00000000..b79851b4
--- /dev/null
+++ b/openstack/etc/nova/rootwrap.d/compute.filters
@@ -0,0 +1,228 @@
+# nova-rootwrap command filters for compute nodes
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# nova/virt/disk/mount/api.py: 'kpartx', '-a', device
+# nova/virt/disk/mount/api.py: 'kpartx', '-d', device
+kpartx: CommandFilter, kpartx, root
+
+# nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
+# nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
+tune2fs: CommandFilter, tune2fs, root
+
+# nova/virt/disk/mount/api.py: 'mount', mapped_device
+# nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
+# nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
+# nova/virt/configdrive.py: 'mount', device, mountdir
+# nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
+mount: CommandFilter, mount, root
+
+# nova/virt/disk/mount/api.py: 'umount', mapped_device
+# nova/virt/disk/api.py: 'umount' target
+# nova/virt/xenapi/vm_utils.py: 'umount', dev_path
+# nova/virt/configdrive.py: 'umount', mountdir
+umount: CommandFilter, umount, root
+
+# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
+# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
+qemu-nbd: CommandFilter, qemu-nbd, root
+
+# nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
+# nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
+losetup: CommandFilter, losetup, root
+
+# nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
+# nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
+blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*
+
+# nova/virt/disk/vfs/localfs.py: 'tee', canonpath
+tee: CommandFilter, tee, root
+
+# nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
+mkdir: CommandFilter, mkdir, root
+
+# nova/virt/disk/vfs/localfs.py: 'chown'
+# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
+# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
+# nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
+chown: CommandFilter, chown, root
+
+# nova/virt/disk/vfs/localfs.py: 'chmod'
+chmod: CommandFilter, chmod, root
+
+# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
+# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
+# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
+# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
+# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
+# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
+# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
+# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
+# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
+# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
+# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
+# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
+# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
+# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
+# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
+# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
+# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
+# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
+# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
+# nova/network/linux_net.py: 'ip', 'route', 'add', ..
+# nova/network/linux_net.py: 'ip', 'route', 'del', .
+# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
+ip: CommandFilter, ip, root
+
+# nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
+# nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
+tunctl: CommandFilter, tunctl, root
+
+# nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
+# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
+# nova/network/linux_net.py: 'ovs-vsctl', ....
+ovs-vsctl: CommandFilter, ovs-vsctl, root
+
+# nova/network/linux_net.py: 'ovs-ofctl', ....
+ovs-ofctl: CommandFilter, ovs-ofctl, root
+
+# nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
+dd: CommandFilter, dd, root
+
+# nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
+iscsiadm: CommandFilter, iscsiadm, root
+
+# nova/virt/libvirt/volume.py: 'aoe-revalidate', aoedev
+# nova/virt/libvirt/volume.py: 'aoe-discover'
+aoe-revalidate: CommandFilter, aoe-revalidate, root
+aoe-discover: CommandFilter, aoe-discover, root
+
+# nova/virt/xenapi/vm_utils.py: parted, --script, ...
+# nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
+parted: CommandFilter, parted, root
+
+# nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
+pygrub: CommandFilter, pygrub, root
+
+# nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
+fdisk: CommandFilter, fdisk, root
+
+# nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
+# nova/virt/disk/api.py: e2fsck, -f, -p, image
+e2fsck: CommandFilter, e2fsck, root
+
+# nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
+# nova/virt/disk/api.py: resize2fs, image
+resize2fs: CommandFilter, resize2fs, root
+
+# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
+iptables-save: CommandFilter, iptables-save, root
+ip6tables-save: CommandFilter, ip6tables-save, root
+
+# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
+
+# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
+# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
+arping: CommandFilter, arping, root
+
+# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
+dhcp_release: CommandFilter, dhcp_release, root
+
+# nova/network/linux_net.py: 'kill', '-9', pid
+# nova/network/linux_net.py: 'kill', '-HUP', pid
+kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
+
+# nova/network/linux_net.py: 'kill', pid
+kill_radvd: KillFilter, root, /usr/sbin/radvd
+
+# nova/network/linux_net.py: dnsmasq call
+dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
+
+# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
+radvd: CommandFilter, radvd, root
+
+# nova/network/linux_net.py: 'brctl', 'addbr', bridge
+# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
+# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
+# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
+brctl: CommandFilter, brctl, root
+
+# nova/virt/libvirt/utils.py: 'mkswap'
+# nova/virt/xenapi/vm_utils.py: 'mkswap'
+mkswap: CommandFilter, mkswap, root
+
+# nova/virt/xenapi/vm_utils.py: 'mkfs'
+# nova/utils.py: 'mkfs', fs, path, label
+mkfs: CommandFilter, mkfs, root
+
+# nova/virt/libvirt/utils.py: 'qemu-img'
+qemu-img: CommandFilter, qemu-img, root
+
+# nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
+readlink: CommandFilter, readlink, root
+
+# nova/virt/disk/api.py: 'touch', target
+touch: CommandFilter, touch, root
+
+# nova/virt/disk/api.py:
+mkfs.ext3: CommandFilter, mkfs.ext3, root
+mkfs.ntfs: CommandFilter, mkfs.ntfs, root
+
+# nova/virt/libvirt/connection.py:
+read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
+
+# nova/virt/libvirt/connection.py:
+lvremove: CommandFilter, lvremove, root
+
+# nova/virt/libvirt/utils.py:
+lvcreate: CommandFilter, lvcreate, root
+
+# nova/virt/libvirt/utils.py:
+lvs: CommandFilter, lvs, root
+
+# nova/virt/libvirt/utils.py:
+vgs: CommandFilter, vgs, root
+
+# nova/virt/baremetal/volume_driver.py: 'tgtadm', '--lld', 'iscsi', ...
+tgtadm: CommandFilter, tgtadm, root
+
+# nova/utils.py:read_file_as_root: 'cat', file_path
+# (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
+read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
+read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow
+
+# nova/virt/libvirt/volume.py: 'multipath' '-R'
+multipath: CommandFilter, multipath, root
+
+# nova/virt/libvirt/utils.py:
+systool: CommandFilter, systool, root
+
+# nova/virt/libvirt/volume.py:
+sginfo: CommandFilter, sginfo, root
+sg_scan: CommandFilter, sg_scan, root
+ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/ip-.*-iscsi-iqn.2010-10.org.openstack:volume-.*, /dev/disk/by-path/ip-.*-iscsi-iqn.2010-10.org.openstack:volume-.*
+
+# nova/volume/encryptors.py:
+# nova/virt/libvirt/dmcrypt.py:
+cryptsetup: CommandFilter, cryptsetup, root
+
+# nova/virt/xenapi/vm_utils.py:
+xenstore-read: CommandFilter, xenstore-read, root
+
+# nova/virt/baremetal/tilera.py: 'rpc.mountd'
+rpc.mountd: CommandFilter, rpc.mountd, root
+
+# nova/virt/libvirt/utils.py:
+rbd: CommandFilter, rbd, root
+
+# nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
+shred: CommandFilter, shred, root
+
+# nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
+cp: CommandFilter, cp, root
+
+# nova/virt/xenapi/vm_utils.py:
+sync: CommandFilter, sync, root
+
diff --git a/openstack/etc/nova/rootwrap.d/network.filters b/openstack/etc/nova/rootwrap.d/network.filters
new file mode 100644
index 00000000..568e8d49
--- /dev/null
+++ b/openstack/etc/nova/rootwrap.d/network.filters
@@ -0,0 +1,94 @@
+# nova-rootwrap command filters for network nodes
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
+# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
+# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
+# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
+# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
+# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
+# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
+# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
+# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
+# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
+# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
+# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
+# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
+# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
+# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
+# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
+# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
+# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
+# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
+# nova/network/linux_net.py: 'ip', 'route', 'add', ..
+# nova/network/linux_net.py: 'ip', 'route', 'del', .
+# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
+ip: CommandFilter, ip, root
+
+# nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
+# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
+# nova/network/linux_net.py: 'ovs-vsctl', ....
+ovs-vsctl: CommandFilter, ovs-vsctl, root
+
+# nova/network/linux_net.py: 'ovs-ofctl', ....
+ovs-ofctl: CommandFilter, ovs-ofctl, root
+
+# nova/virt/libvirt/vif.py: 'ivs-ctl', ...
+# nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...
+# nova/network/linux_net.py: 'ivs-ctl', ....
+ivs-ctl: CommandFilter, ivs-ctl, root
+
+# nova/virt/libvirt/vif.py: 'ifc_ctl', ...
+ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root
+
+# nova/virt/libvirt/vif.py: 'ebrctl', ...
+ebrctl: CommandFilter, ebrctl, root
+
+# nova/virt/libvirt/vif.py: 'mm-ctl', ...
+mm-ctl: CommandFilter, mm-ctl, root
+
+# nova/network/linux_net.py: 'ebtables', '-D' ...
+# nova/network/linux_net.py: 'ebtables', '-I' ...
+ebtables: CommandFilter, ebtables, root
+ebtables_usr: CommandFilter, ebtables, root
+
+# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
+iptables-save: CommandFilter, iptables-save, root
+ip6tables-save: CommandFilter, ip6tables-save, root
+
+# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
+
+# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
+# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
+arping: CommandFilter, arping, root
+
+# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
+dhcp_release: CommandFilter, dhcp_release, root
+
+# nova/network/linux_net.py: 'kill', '-9', pid
+# nova/network/linux_net.py: 'kill', '-HUP', pid
+kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
+
+# nova/network/linux_net.py: 'kill', pid
+kill_radvd: KillFilter, root, /usr/sbin/radvd
+
+# nova/network/linux_net.py: dnsmasq call
+dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
+
+# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
+radvd: CommandFilter, radvd, root
+
+# nova/network/linux_net.py: 'brctl', 'addbr', bridge
+# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
+# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
+# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
+brctl: CommandFilter, brctl, root
+
+# nova/network/linux_net.py: 'sysctl', ....
+sysctl: CommandFilter, sysctl, root
+
+# nova/network/linux_net.py: 'conntrack'
+conntrack: CommandFilter, conntrack, root
diff --git a/openstack/etc/systemd/system/openstack-glance-api.service b/openstack/etc/systemd/system/openstack-glance-api.service
new file mode 100644
index 00000000..7958f84c
--- /dev/null
+++ b/openstack/etc/systemd/system/openstack-glance-api.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=OpenStack Image Service (code-named Glance) API server
+After=syslog.target network.target
+
+[Service]
+Type=simple
+User=glance
+ExecStart=/usr/bin/glance-api --config-file /etc/glance/glance-api.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/openstack/etc/systemd/system/openstack-glance-registry.service b/openstack/etc/systemd/system/openstack-glance-registry.service
new file mode 100644
index 00000000..18a60ae1
--- /dev/null
+++ b/openstack/etc/systemd/system/openstack-glance-registry.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=OpenStack Image Service (code-named Glance) Registry server
+After=syslog.target network.target
+
+[Service]
+Type=simple
+User=glance
+ExecStart=/usr/bin/glance-registry --config-file /etc/glance/glance-registry.conf
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/openstack/etc/systemd/system/openstack-glance-setup.service b/openstack/etc/systemd/system/openstack-glance-setup.service
new file mode 100644
index 00000000..a3ff217b
--- /dev/null
+++ b/openstack/etc/systemd/system/openstack-glance-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run openstack-glance-setup (once)
+Requires=local-fs.target
+After=local-fs.target openstack-keystone-setup.service
+
+[Service]
+Type=oneshot
+ExecStart=/usr/share/openstack/openstack-glance-setup
+Restart=no
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/etc/systemd/system/openstack-keystone-setup.service b/openstack/etc/systemd/system/openstack-keystone-setup.service
new file mode 100644
index 00000000..88e3c32e
--- /dev/null
+++ b/openstack/etc/systemd/system/openstack-keystone-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run openstack-keystone-setup (once)
+Requires=local-fs.target
+After=local-fs.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/share/openstack/openstack-keystone-setup
+Restart=no
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/etc/systemd/system/openstack-keystone.service b/openstack/etc/systemd/system/openstack-keystone.service
new file mode 100644
index 00000000..82b2d078
--- /dev/null
+++ b/openstack/etc/systemd/system/openstack-keystone.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=OpenStack Identity Service (code-named Keystone)
+After=syslog.target network.target
+
+[Service]
+Type=notify
+Restart=always
+User=keystone
+ExecStart=/usr/bin/keystone-all --config-file /etc/keystone/keystone.conf
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/etc/systemd/system/openstack-nova-setup.service b/openstack/etc/systemd/system/openstack-nova-setup.service
new file mode 100644
index 00000000..20d56852
--- /dev/null
+++ b/openstack/etc/systemd/system/openstack-nova-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Run openstack-nova-setup (once)
+Requires=local-fs.target
+After=local-fs.target openstack-keystone-setup.service
+
+[Service]
+Type=oneshot
+ExecStart=/usr/share/openstack/openstack-nova-setup
+Restart=no
+
+[Install]
+WantedBy=multi-user.target
diff --git a/openstack/manifest b/openstack/manifest
new file mode 100644
index 00000000..100cf0f9
--- /dev/null
+++ b/openstack/manifest
@@ -0,0 +1,51 @@
+0040755 0 0 /etc/keystone
+0040755 0 0 /var/lib/keystone
+0040755 0 0 /usr/share/openstack
+0100755 0 0 /usr/share/openstack/openstack-keystone-setup
+0100644 0 0 /etc/keystone/logging.conf
+0100644 0 0 /etc/keystone/keystone.conf
+0100644 0 0 /etc/keystone/policy.json
+0100644 0 0 /etc/keystone/keystone-paste.ini
+0100644 0 0 /etc/logrotate.d/openstack-keystone
+0100644 0 0 /etc/systemd/system/openstack-keystone.service
+0100644 0 0 /etc/systemd/system/openstack-keystone-setup.service
+0040755 0 0 /etc/glance
+0040755 0 0 /var/lib/glance
+0040755 0 0 /var/lib/glance/image-cache
+0040755 0 0 /var/lib/glance/image-cache/incomplete
+0040755 0 0 /var/lib/glance/image-cache/invalid
+0040755 0 0 /var/lib/glance/image-cache/queue
+0040755 0 0 /var/lib/glance/images
+0040755 0 0 /var/log/glance
+0100644 0 0 /etc/glance/logging.conf
+0100644 0 0 /etc/glance/glance-api.conf
+0100644 0 0 /etc/glance/glance-registry.conf
+0100644 0 0 /etc/glance/glance-scrubber.conf
+0100644 0 0 /etc/glance/glance-cache.conf
+0100644 0 0 /etc/glance/policy.json
+0100644 0 0 /etc/glance/glance-api-paste.ini
+0100644 0 0 /etc/glance/glance-registry-paste.ini
+0100644 0 0 /etc/logrotate.d/openstack-glance-api
+0100644 0 0 /etc/logrotate.d/openstack-glance-registry
+0100644 0 0 /etc/systemd/system/openstack-glance-setup.service
+0100644 0 0 /etc/systemd/system/openstack-glance-api.service
+0100644 0 0 /etc/systemd/system/openstack-glance-registry.service
+0100755 0 0 /usr/share/openstack/openstack-glance-setup
+0040755 0 0 /var/lib/nova
+0040755 0 0 /var/lock/nova
+0040755 0 0 /etc/nova
+0100644 0 0 /etc/nova/logging.conf
+0100644 0 0 /etc/nova/rootwrap.conf
+0100644 0 0 /etc/nova/nova.conf
+0100644 0 0 /etc/nova/policy.json
+0100644 0 0 /etc/nova/cells.json
+0100644 0 0 /etc/nova/api-paste.ini
+0040755 0 0 /etc/nova/rootwrap.d/
+0100644 0 0 /etc/nova/rootwrap.d/api-metadata.filters
+0100644 0 0 /etc/nova/rootwrap.d/baremetal-compute-ipmi.filters
+0100644 0 0 /etc/nova/rootwrap.d/baremetal-deploy-helper.filters
+0100644 0 0 /etc/nova/rootwrap.d/compute.filters
+0100644 0 0 /etc/nova/rootwrap.d/network.filters
+0100755 0 0 /usr/share/openstack/openstack-nova-setup
+0100644 0 0 /usr/share/openstack/openstack-nova-compute-setup
+0100644 0 0 /etc/systemd/system/openstack-nova-setup.service
diff --git a/openstack/usr/share/openstack/openstack-glance-setup b/openstack/usr/share/openstack/openstack-glance-setup
new file mode 100644
index 00000000..63b73422
--- /dev/null
+++ b/openstack/usr/share/openstack/openstack-glance-setup
@@ -0,0 +1,57 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+# Create required system users and groups
+getent group glance >/dev/null || groupadd -r --gid 164 glance
+getent passwd glance >/dev/null || \
+ useradd --uid 164 -r -g glance -d /var/lib/glance -s /sbin/nologin \
+ -c "OpenStack Glance Daemons" glance
+
+# Create required keystone tenants, users and roles
+export OS_SERVICE_TOKEN=##KEYSTONE_TEMPORARY_ADMIN_TOKEN##
+export OS_SERVICE_ENDPOINT='http://localhost:35357/v2.0'
+
+keystone user-create --name ##GLANCE_SERVICE_USER## --pass ##GLANCE_SERVICE_PASSWORD##
+keystone user-role-add --tenant service --user ##GLANCE_SERVICE_USER## --role admin
+
+keystone service-create --name glance --type image --description "OpenStack Image Service"
+keystone endpoint-create --service-id $(keystone service-list | awk '/ image / {print $2}') \
+ --publicurl ##GLANCE_PUBLIC_URL## \
+ --internalurl ##GLANCE_INTERNAL_URL## \
+ --adminurl ##GLANCE_ADMIN_URL##
+
+# Setup the glance database
+if [ ! -e /var/lib/glance/glance.sqlite ]; then
+ chown glance:glance /var/lib/glance
+ chown glance:glance /var/log/glance
+ sudo -u glance glance-manage db_sync
+fi
+
+rm /etc/systemd/system/multi-user.target.wants/openstack-glance-setup.service
+
+systemctl start openstack-glance-api
+systemctl start openstack-glance-registry
+
+ln -s "/etc/systemd/system/openstack-glance-api.service" \
+ "/etc/systemd/system/multi-user.target.wants/openstack-glance-api.service"
+
+ln -s "/etc/systemd/system/openstack-glance-registry.service" \
+ "/etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service"
+
+exit 0
diff --git a/openstack/usr/share/openstack/openstack-keystone-setup b/openstack/usr/share/openstack/openstack-keystone-setup
new file mode 100644
index 00000000..9dc204d4
--- /dev/null
+++ b/openstack/usr/share/openstack/openstack-keystone-setup
@@ -0,0 +1,56 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+# Create required system users and groups
+
+getent group keystone >/dev/null || groupadd -r --gid 163 keystone
+getent passwd keystone >/dev/null || \
+ useradd --uid 163 -r -g keystone -d /var/lib/keystone -s /sbin/nologin \
+ -c "OpenStack Keystone Daemons" keystone
+
+# Setup the keystone database
+
+if [ ! -e /var/lib/keystone/keystone.sqlite ]; then
+ chown keystone:keystone /var/lib/keystone
+ sudo -u keystone keystone-manage db_sync
+fi
+
+systemctl start openstack-keystone
+
+export OS_SERVICE_TOKEN=##KEYSTONE_TEMPORARY_ADMIN_TOKEN##
+export OS_SERVICE_ENDPOINT='http://localhost:35357/v2.0'
+
+# This script creates a TEMPORARY admin user, with a password that may
+# float arount on the system. Please delete this user once you have set up
+# the real admin user with a real secure password.
+
+keystone tenant-create --name admin --description "Admin Tenant"
+keystone role-create --name admin
+
+keystone user-create --name temporary_admin --pass ##KEYSTONE_TEMPORARY_ADMIN_PASSWORD##
+keystone user-role-add --tenant admin --user temporary_admin --role admin
+
+keystone tenant-create --name service --description "Service Tenant"
+
+rm /etc/systemd/system/multi-user.target.wants/openstack-keystone-setup.service
+
+ln -s "/etc/systemd/system/openstack-keystone.service" \
+ "/etc/systemd/system/multi-user.target.wants/openstack-keystone.service"
+
+exit 0
diff --git a/openstack/usr/share/openstack/openstack-nova-compute-setup b/openstack/usr/share/openstack/openstack-nova-compute-setup
new file mode 100644
index 00000000..9dc204d4
--- /dev/null
+++ b/openstack/usr/share/openstack/openstack-nova-compute-setup
@@ -0,0 +1,56 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+# Create required system users and groups
+
+getent group keystone >/dev/null || groupadd -r --gid 163 keystone
+getent passwd keystone >/dev/null || \
+ useradd --uid 163 -r -g keystone -d /var/lib/keystone -s /sbin/nologin \
+ -c "OpenStack Keystone Daemons" keystone
+
+# Setup the keystone database
+
+if [ ! -e /var/lib/keystone/keystone.sqlite ]; then
+ chown keystone:keystone /var/lib/keystone
+ sudo -u keystone keystone-manage db_sync
+fi
+
+systemctl start openstack-keystone
+
+export OS_SERVICE_TOKEN=##KEYSTONE_TEMPORARY_ADMIN_TOKEN##
+export OS_SERVICE_ENDPOINT='http://localhost:35357/v2.0'
+
+# This script creates a TEMPORARY admin user, with a password that may
+# float arount on the system. Please delete this user once you have set up
+# the real admin user with a real secure password.
+
+keystone tenant-create --name admin --description "Admin Tenant"
+keystone role-create --name admin
+
+keystone user-create --name temporary_admin --pass ##KEYSTONE_TEMPORARY_ADMIN_PASSWORD##
+keystone user-role-add --tenant admin --user temporary_admin --role admin
+
+keystone tenant-create --name service --description "Service Tenant"
+
+rm /etc/systemd/system/multi-user.target.wants/openstack-keystone-setup.service
+
+ln -s "/etc/systemd/system/openstack-keystone.service" \
+ "/etc/systemd/system/multi-user.target.wants/openstack-keystone.service"
+
+exit 0
diff --git a/openstack/usr/share/openstack/openstack-nova-setup b/openstack/usr/share/openstack/openstack-nova-setup
new file mode 100644
index 00000000..adb45ed8
--- /dev/null
+++ b/openstack/usr/share/openstack/openstack-nova-setup
@@ -0,0 +1,64 @@
+#!/bin/sh
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+
+# Create required system users and groups
+
+getent group nova >/dev/null || groupadd -r --gid 165 nova
+getent passwd nova >/dev/null || \
+ useradd --uid 165 -r -g nova -d /var/lib/nova -s /sbin/nologin \
+ -c "OpenStack Keystone Daemons" nova
+
+# Create the keystone user and services
+
+export OS_SERVICE_TOKEN=##KEYSTONE_TEMPORARY_ADMIN_TOKEN##
+export OS_SERVICE_ENDPOINT='http://localhost:35357/v2.0'
+
+keystone user-create --name ##NOVA_SERVICE_USER## --pass ##NOVA_SERVICE_PASSWORD##
+keystone user-role-add --tenant service --user ##NOVA_SERVICE_USER## --role admin
+
+keystone service-create --name nova --type compute --description "OpenStack Image Service"
+keystone endpoint-create --service-id $(keystone service-list | awk '/ compute / {print $2}') \
+ --publicurl ##NOVA_PUBLIC_URL## \
+ --internalurl ##NOVA_INTERNAL_URL## \
+ --adminurl ##NOVA_ADMIN_URL## \
+ --region ##NOVA_REGION##
+
+# Setup the nova database
+
+if [ ! -e /var/lib/nova/nova.sqlite ]; then
+ chown nova:nova /var/lib/nova
+ sudo -u nova nova-manage db_sync
+fi
+
+#systemctl start openstack-nova-api
+#systemctl start openstack-nova-cert
+#systemctl start openstack-nova-consoleauth
+#systemctl start openstack-nova-scheduler
+#systemctl start openstack-nova-conductor
+#systemctl start openstack-nova-novncproxy
+
+# TODO, need to start more nova services.
+
+#ln -s "/etc/systemd/system/openstack-nova-api.service" \
+# "/etc/systemd/system/multi-user.target.wants/openstack-nova-api.service"
+
+#ln -s "/etc/systemd/system/openstack-nova-cert.service" \
+# "/etc/systemd/system/multi-user.target.wants/openstack-nova-cert.service"
+
+exit 0
diff --git a/strata/openstack-services.morph b/strata/openstack-services.morph
new file mode 100644
index 00000000..b3ec1e87
--- /dev/null
+++ b/strata/openstack-services.morph
@@ -0,0 +1,531 @@
+build-depends:
+- {morph: strata/tools.morph}
+- {morph: strata/erlang.morph}
+chunks:
+- build-depends: []
+ name: pip
+ ref: master
+ repo: upstream:pip
+- build-depends: [pip]
+ name: pbr
+ ref: master
+ repo: upstream:pbr
+- build-depends: [pbr]
+ name: oslo-config
+ ref: master
+ repo: upstream:openstack/oslo-config
+- build-depends: [pbr]
+ name: oslo-rootwrap
+ ref: master
+ repo: upstream:openstack/oslo-rootwrap
+- build-depends: [pbr]
+ name: stevedore
+ ref: master
+ repo: upstream:openstack/stevedore
+- build-depends: [pbr]
+ name: webob
+ ref: master
+ repo: upstream:python-packages/webob
+- build-depends: [pbr]
+ name: eventlet
+ ref: master
+ repo: upstream:python-packages/eventlet
+- build-depends: [pbr]
+ name: greenlet
+ ref: master
+ repo: upstream:python-packages/greenlet
+- build-depends: [pbr]
+ name: kombu
+ ref: master
+ repo: upstream:python-packages/kombu
+- build-depends: [pbr]
+ name: sqlalchemy
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/sqlalchemy
+- build-depends: [pbr]
+ name: alembic
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/alembic
+- build-depends: [pbr]
+ name: lockfile
+ ref: master
+ repo: upstream:python-packages/lockfile
+- build-depends: [pbr]
+ name: dogpile-cache
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/dogpile-cache
+- build-depends: [pbr]
+ name: dogpile-core
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/dogpile-core
+- build-depends: [pbr]
+ name: pycrypto
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/pycrypto
+- build-depends: [pbr]
+ name: pyjwt
+ ref: master
+ repo: upstream:python-packages/pyjwt
+- build-depends: [pbr]
+ name: creole
+ ref: master
+ repo: upstream:python-packages/creole
+- build-depends: [pbr]
+ name: mock
+ ref: master
+ repo: upstream:python-packages/mock
+- build-depends: [pbr]
+ name: nose
+ ref: master
+ repo: upstream:python-packages/nose
+- build-depends: [pbr]
+ name: sphinx
+ ref: master
+ repo: upstream:python-packages/sphinx
+- build-depends: [pbr]
+ name: tox
+ ref: master
+ repo: upstream:python-packages/tox
+- build-depends: [pbr]
+ name: pylint
+ ref: master
+ repo: upstream:python-packages/pylint
+- build-depends: [pbr]
+ name: pycco
+ ref: master
+ repo: upstream:python-packages/pycco
+- build-depends: [pbr]
+ morph: strata/openstack-services/decorator.morph
+ name: decorator
+ ref: master
+ repo: /src/openstack-ct187/depends/micheles
+- build-depends: [pbr]
+ name: python-dateutil
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/python-dateutil
+- build-depends: [pbr]
+ morph: strata/openstack-services/docutils.morph
+ name: docutils
+ ref: master
+ repo: upstream:python-packages/docutils
+- build-depends: [pbr]
+ name: posix-ipc-tarball
+ ref: master
+ repo: upstream:python-packages/posix-ipc-tarball
+- build-depends: [pbr]
+ name: pastedeploy
+ ref: master
+ repo: upstream:python-packages/pastedeploy
+- build-depends: [pbr]
+ name: paste
+ ref: master
+ repo: upstream:python-packages/paste
+- build-depends: [pbr]
+ name: routes
+ ref: master
+ repo: upstream:python-packages/routes
+- build-depends: [pbr]
+ name: passlib
+ ref: master
+ repo: upstream:python-packages/passlib
+- build-depends: [pbr]
+ name: tempita
+ ref: master
+ repo: upstream:python-packages/tempita
+- build-depends: [pbr]
+ name: websockify
+ ref: baserock/v0.6.0
+ repo: upstream:python-packages/websockify
+- build-depends: [pbr]
+ name: testtools
+ ref: master
+ repo: upstream:python-packages/testtools
+- build-depends: [pbr]
+ name: pyopenssl
+ ref: master
+ repo: upstream:python-packages/pyopenssl
+- build-depends: [pbr]
+ name: httplib2
+ ref: master
+ repo: upstream:python-packages/httplib2
+- build-depends: [pbr]
+ name: testrepository
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/testrepository
+- build-depends: [pbr]
+ name: suds
+ ref: master
+ repo: upstream:python-packages/suds
+- build-depends: [pbr]
+ name: testscenarios
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/testscenarios
+- build-depends: [pbr]
+ name: fixtures
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/fixtures
+- build-depends: [pbr]
+ name: anyjson
+ ref: master
+ repo: upstream:python-packages/anyjson
+- build-depends: [pbr]
+ name: mox
+ ref: master
+ repo: upstream:python-packages/mox
+- build-depends: [pbr]
+ name: rsa
+ ref: master
+ repo: upstream:python-packages/rsa
+- build-depends: [pbr]
+ name: python-subunit
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/python-subunit
+- build-depends: [pbr]
+ name: jsonpointer
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/jsonpointer
+- build-depends: [pbr]
+ name: urllib3
+ ref: master
+ repo: upstream:python-packages/urllib3
+- build-depends: [pbr]
+ name: cliff
+ ref: master
+ repo: upstream:python-packages/cliff
+- build-depends: [pbr]
+ name: jsonrpclib
+ ref: master
+ repo: upstream:python-packages/jsonrpclib
+- build-depends: [pbr]
+ name: rtslib-fb
+ ref: master
+ repo: upstream:python-packages/rtslib-fb
+- build-depends: [pbr]
+ name: futures
+ ref: master
+ repo: upstream:python-packages/futures
+- build-depends: [pbr]
+ name: netifaces
+ ref: master
+ repo: upstream:python-packages/netifaces
+- build-depends: [pbr]
+ name: networkx
+ ref: master
+ repo: upstream:python-packages/networkx
+- build-depends: [pbr]
+ name: dnspython
+ ref: master
+ repo: upstream:python-packages/dnspython
+- build-depends: [pbr]
+ name: pycparser
+ ref: master
+ repo: upstream:python-packages/pycparser
+- build-depends: [pbr]
+ name: pyasn1
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/pyasn1
+- build-depends: [pbr]
+ name: pexpect
+ ref: master
+ repo: upstream:python-packages/pexpect
+- build-depends: [pbr]
+ name: jsonpath-rw
+ ref: master
+ repo: upstream:python-packages/jsonpath-rw
+- build-depends: [pbr]
+ name: pecan
+ ref: master
+ repo: upstream:python-packages/pecan
+- build-depends: [pbr]
+ name: croniter
+ ref: master
+ repo: upstream:python-packages/croniter
+- build-depends: [pbr]
+ name: msgpack-python
+ ref: master
+ repo: upstream:python-packages/msgpack-python
+- build-depends: [pbr]
+ morph: strata/openstack-services/qpid-python.morph
+ name: qpid-python
+ ref: trunk
+ repo: upstream:python-packages/qpid-python
+- build-depends: [pbr]
+ name: wsme
+ ref: master
+ repo: upstream:python-packages/wsme
+- build-depends: [pbr]
+ name: pysnmp
+ ref: master
+ repo: upstream:python-packages/pysnmp
+- build-depends: [pbr]
+ name: repoze-lru
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/repoze-lru
+- build-depends: [pbr]
+ morph: strata/openstack-services/thrift.morph
+ name: thrift
+ ref: 0.9.1
+ repo: upstream:thrift
+- build-depends: [pbr]
+ morph: strata/openstack-services/ipaddr-py.morph
+ name: ipaddr-py
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/ipaddr-py
+- build-depends: [pbr]
+ name: simplegeneric
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/simplegeneric
+- build-depends: [pbr]
+ name: retrying
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/retrying
+- build-depends: [pbr]
+ morph: strata/openstack-services/rabbitmq-server.morph
+ name: rabbitmq-server
+ ref: master
+ repo: /src/openstack-ct187/depends/rabbitmq-server
+- build-depends: [pbr]
+ name: swig-tarball
+ ref: master
+ repo: upstream:swig-tarball
+- build-depends: [pbr]
+ name: babel
+ ref: baserock/morph
+ repo: upstream:babel
+- build-depends: [pbr]
+ name: pyiso8601
+ ref: baserock/morph
+ repo: upstream:pyiso8601
+- build-depends: [pbr]
+ name: netaddr
+ ref: baserock/morph
+ repo: upstream:netaddr
+- build-depends: [pbr]
+ name: python-prettytable
+ ref: baserock/morph
+ repo: upstream:python-prettytable
+- build-depends: [pbr]
+ name: python-requests
+ ref: baserock/morph
+ repo: upstream:python-requests
+- build-depends: [pbr]
+ name: jsonschema
+ ref: baserock/morph
+ repo: upstream:jsonschema
+- build-depends: [pbr]
+ name: jinja2
+ ref: baserock/morph
+ repo: upstream:jinja2
+- build-depends: [pbr]
+ name: simplejson
+ ref: baserock/morph
+ repo: upstream:simplejson
+- build-depends: [pbr]
+ name: pytz
+ ref: baserock/morph
+ repo: upstream:pytz
+- build-depends: [pbr]
+ name: paramiko
+ ref: baserock/morph
+ repo: upstream:paramiko
+- build-depends: [pbr]
+ name: boto
+ ref: baserock/morph
+ repo: upstream:boto
+- build-depends: [babel, pyiso8601, netaddr, oslo-config, pbr, python-prettytable,
+ python-requests, stevedore, pbr]
+ name: python-keystoneclient
+ ref: master
+ repo: upstream:openstack/python-keystoneclient
+- build-depends: [python-requests, simplejson, pbr]
+ name: python-swiftclient
+ ref: master
+ repo: upstream:openstack/python-swiftclient
+- build-depends: [pbr, pyiso8601, python-prettytable, python-requests, simplejson,
+ babel, pbr]
+ name: python-novaclient
+ ref: master
+ repo: upstream:openstack/python-novaclient
+- build-depends: [pbr, python-prettytable, python-requests, simplejson, babel, pbr]
+ name: python-troveclient
+ ref: master
+ repo: upstream:openstack/python-troveclient
+- build-depends: [oslo-config, stevedore, pyiso8601, eventlet, babel, kombu, pbr]
+ name: oslo-messaging
+ ref: master
+ repo: upstream:openstack/oslo-messaging
+- build-depends: [stevedore, netaddr, pyiso8601, babel, suds, eventlet, pbr]
+ name: oslo-vmware
+ ref: master
+ repo: upstream:openstack/oslo-vmware
+- build-depends: [babel, pbr]
+ name: oslo-i18n
+ ref: master
+ repo: upstream:openstack/oslo-i18n
+- build-depends: [fixtures, python-subunit, testrepository, testscenarios, testtools,
+ mock, mox, pbr]
+ name: oslotest
+ ref: master
+ repo: upstream:openstack/oslotest
+- build-depends: [pbr, anyjson, pyiso8601, networkx, babel, stevedore, futures, pbr]
+ name: taskflow
+ ref: master
+ repo: upstream:openstack/taskflow
+- build-depends: [pbr, sqlalchemy, decorator, tempita, pbr]
+ name: sqlalchemy-migrate
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/sqlalchemy-migrate
+- build-depends: [docutils, creole, pycco, pycrypto, mock, nose, sphinx, pyjwt, pbr]
+ name: oauthlib
+ ref: master
+ repo: upstream:python-packages/oauthlib
+- build-depends: [nose, mock, pylint, sphinx, tox, pbr]
+ name: coverage
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/coverage
+- build-depends: [urllib3, pbr]
+ name: httpretty
+ ref: master
+ repo: upstream:python-packages/httpretty
+- build-depends: [jsonpointer, pbr]
+ name: jsonpatch
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/jsonpatch
+- build-depends: [pycparser, pbr]
+ name: cffi
+ ref: master
+ repo: upstream:python-packages/cffi
+- build-depends: [thrift, pbr]
+ name: happybase
+ ref: master
+ repo: upstream:python-packages/happybase
+- build-depends: [webob, pbr]
+ name: osprofiler
+ ref: 0.3.0
+ repo: /src/openstack-ct187/depends/python-packages/osprofiler
+- build-depends: [pbr, python-prettytable, python-keystoneclient, python-requests,
+ simplejson, babel, pbr]
+ name: python-cinderclient
+ ref: master
+ repo: upstream:openstack/python-cinderclient
+- build-depends: [pbr, cliff, pyiso8601, netaddr, python-requests, python-keystoneclient,
+ simplejson, babel, pbr]
+ name: python-neutronclient
+ ref: master
+ repo: upstream:openstack/python-neutronclient
+- build-depends: [pbr, pyiso8601, python-prettytable, python-keystoneclient, pbr]
+ name: python-ceilometerclient
+ ref: master
+ repo: upstream:openstack/python-ceilometerclient
+- build-depends: [pyiso8601, pbr, python-prettytable, python-keystoneclient, python-requests,
+ pbr]
+ name: python-heatclient
+ ref: master
+ repo: upstream:openstack/python-heatclient
+- build-depends: [cliff, jsonschema, pbr, python-keystoneclient, python-requests,
+ stevedore, pbr]
+ name: python-designateclient
+ ref: master
+ repo: upstream:openstack/python-designateclient
+- build-depends: [babel, pyiso8601, oslo-config, oslo-i18n, pbr]
+ name: oslo-utils
+ ref: master
+ repo: /src/openstack-ct187/packages/oslo-utils
+- build-depends: [babel, pyiso8601, netaddr, oslo-config, oslo-messaging, posix-ipc-tarball,
+ pytz, webob, pbr]
+ name: pycadf
+ ref: master
+ repo: upstream:python-packages/pycadf
+- build-depends: [cffi, pbr]
+ morph: strata/openstack-services/xattr.morph
+ name: xattr
+ ref: master
+ repo: upstream:python-packages/xattr
+- build-depends: [pbr, cffi]
+ name: cryptography
+ ref: master
+ repo: /src/openstack-ct187/depends/python-packages/cryptography
+- build-depends: [jsonschema, jsonpatch, pbr]
+ name: warlock
+ ref: master
+ repo: upstream:warlock
+- build-depends: [pbr, greenlet, sqlalchemy, anyjson, eventlet, pastedeploy, routes,
+ webob, boto, sqlalchemy-migrate, httplib2, kombu, pycrypto, pyiso8601, oslo-config,
+ stevedore, netaddr, python-swiftclient, oslo-vmware, paste, jsonschema, python-cinderclient,
+ python-keystoneclient, pyopenssl, oslo-messaging, pbr, cryptography, simplegeneric,
+ ipaddr-py, osprofiler, retrying]
+ name: glance
+ ref: master
+ repo: upstream:openstack/glance
+- build-depends: [pbr, webob, eventlet, greenlet, netaddr, pastedeploy, paste, routes,
+ sqlalchemy, sqlalchemy-migrate, passlib, pyiso8601, python-keystoneclient, oslo-config,
+ oslo-messaging, babel, oauthlib, dogpile-cache, jsonschema, pycadf, posix-ipc-tarball,
+ pbr, repoze-lru, dogpile-core, oslo-utils, decorator]
+ name: keystone
+ ref: 2014.2.b1
+ repo: upstream:openstack/keystone
+- build-depends: [pbr, paste, pastedeploy, routes, anyjson, babel, eventlet, greenlet,
+ httplib2, python-requests, pyiso8601, jsonrpclib, jinja2, kombu, netaddr, python-neutronclient,
+ sqlalchemy, webob, python-keystoneclient, alembic, stevedore, oslo-config, oslo-messaging,
+ oslo-rootwrap, python-novaclient, pbr]
+ name: neutron
+ ref: master
+ repo: upstream:openstack/neutron
+- build-depends: [dnspython, eventlet, greenlet, netifaces, pastedeploy, simplejson,
+ xattr, pbr]
+ name: swift
+ ref: master
+ repo: upstream:openstack/swift
+- build-depends: [pbr, sqlalchemy, eventlet, kombu, routes, webob, pastedeploy, paste,
+ sqlalchemy-migrate, netaddr, httplib2, passlib, python-heatclient, python-novaclient,
+ python-cinderclient, python-keystoneclient, python-swiftclient, python-designateclient,
+ pyiso8601, jsonschema, jinja2, pexpect, oslo-config, babel, pbr]
+ name: trove
+ ref: master
+ repo: upstream:openstack/trove
+- build-depends: [pbr, babel, python-prettytable, python-keystoneclient, pyopenssl,
+ warlock, pbr]
+ name: python-glanceclient
+ ref: master
+ repo: upstream:openstack/python-glanceclient
+- build-depends: [alembic, babel, pyiso8601, oslo-i18n, oslo-config, oslo-utils, sqlalchemy,
+ sqlalchemy-migrate, stevedore, pbr]
+ name: oslo-db
+ ref: master
+ repo: upstream:openstack/oslo-db
+- build-depends: [pbr, sqlalchemy, anyjson, boto, eventlet, jinja2, kombu, routes,
+ webob, greenlet, pastedeploy, paste, sqlalchemy-migrate, netaddr, suds, paramiko,
+ pyasn1, babel, pyiso8601, jsonschema, python-cinderclient, python-neutronclient,
+ python-glanceclient, python-keystoneclient, stevedore, websockify, oslo-config,
+ oslo-rootwrap, oslotest, pycadf, oslo-messaging, pbr]
+ name: nova
+ ref: master
+ repo: upstream:openstack/nova
+- build-depends: [pbr, anyjson, babel, eventlet, greenlet, pyiso8601, kombu, netaddr,
+ oslo-config, oslo-messaging, oslo-rootwrap, paramiko, paste, pastedeploy, python-glanceclient,
+ python-keystoneclient, python-novaclient, python-swiftclient, python-requests,
+ routes, taskflow, rtslib-fb, sqlalchemy, sqlalchemy-migrate, stevedore, suds,
+ webob, pbr]
+ name: cinder
+ ref: master
+ repo: upstream:openstack/cinder
+- build-depends: [alembic, anyjson, croniter, eventlet, happybase, pyiso8601, jsonpath-rw,
+ jsonschema, lockfile, msgpack-python, netaddr, oslo-config, oslo-vmware, pastedeploy,
+ pbr, pecan, posix-ipc-tarball, oslo-messaging, pysnmp, python-ceilometerclient,
+ python-glanceclient, python-keystoneclient, python-neutronclient, python-novaclient,
+ python-swiftclient, pytz, python-requests, sqlalchemy, sqlalchemy-migrate, stevedore,
+ webob, wsme, pbr]
+ name: ceilometer
+ ref: master
+ repo: upstream:openstack/ceilometer
+- build-depends: [babel, eventlet, greenlet, httplib2, pyiso8601, kombu, netaddr,
+ oslo-config, oslo-messaging, pastedeploy, pbr, posix-ipc-tarball, pycrypto, python-ceilometerclient,
+ python-cinderclient, python-glanceclient, python-heatclient, python-keystoneclient,
+ python-neutronclient, python-novaclient, python-swiftclient, python-troveclient,
+ qpid-python, python-requests, routes, sqlalchemy, sqlalchemy-migrate, webob, pbr]
+ name: heat
+ ref: master
+ repo: upstream:openstack/heat
+description: Openstack clients and services
+kind: stratum
+name: openstack-services
diff --git a/strata/openstack-services/decorator.morph b/strata/openstack-services/decorator.morph
new file mode 100644
index 00000000..00162855
--- /dev/null
+++ b/strata/openstack-services/decorator.morph
@@ -0,0 +1,6 @@
+name: decorator
+kind: chunk
+build-commands:
+- cd decorator && python setup.py build
+install-commands:
+- cd decorator && python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/openstack-services/docutils.morph b/strata/openstack-services/docutils.morph
new file mode 100644
index 00000000..fefd3487
--- /dev/null
+++ b/strata/openstack-services/docutils.morph
@@ -0,0 +1,6 @@
+name: docutils
+kind: chunk
+build-commands:
+- cd docutils && python setup.py build
+install-commands:
+- cd docutils && python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/openstack-services/ipaddr-py.morph b/strata/openstack-services/ipaddr-py.morph
new file mode 100644
index 00000000..f6691ab7
--- /dev/null
+++ b/strata/openstack-services/ipaddr-py.morph
@@ -0,0 +1,6 @@
+name: ipaddr-py
+kind: chunk
+build-commands:
+- cd trunk && python setup.py build
+install-commands:
+- cd trunk && python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/openstack-services/qpid-python.morph b/strata/openstack-services/qpid-python.morph
new file mode 100644
index 00000000..203b3db5
--- /dev/null
+++ b/strata/openstack-services/qpid-python.morph
@@ -0,0 +1,6 @@
+name: qpid-python
+kind: chunk
+build-commands:
+- cd qpid/python && python setup.py build
+install-commands:
+- cd qpid/python && python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/strata/openstack-services/rabbitmq-server.morph b/strata/openstack-services/rabbitmq-server.morph
new file mode 100644
index 00000000..e0f26be4
--- /dev/null
+++ b/strata/openstack-services/rabbitmq-server.morph
@@ -0,0 +1,7 @@
+name: rabbitmq-server
+kind: chunk
+configure-commands:
+build-commands:
+- make
+install-commands:
+- make install_bin TARGET_DIR="$DESTDIR" SBIN_DIR="$DESTDIR"/sbin MAN_DIR=/usr/share/man DOC_INSTALL_DIR=/usr/share
diff --git a/strata/openstack-services/thrift.morph b/strata/openstack-services/thrift.morph
new file mode 100644
index 00000000..321e06d6
--- /dev/null
+++ b/strata/openstack-services/thrift.morph
@@ -0,0 +1,9 @@
+{
+ "name": "thrift",
+ "kind": "chunk",
+ "build-system": "autotools",
+ "configure-commands": [
+ "./bootstrap.sh",
+ "./configure --prefix=\"$PREFIX\" --without-erlang"
+ ]
+}
diff --git a/strata/openstack-services/xattr.morph b/strata/openstack-services/xattr.morph
new file mode 100644
index 00000000..65f47dae
--- /dev/null
+++ b/strata/openstack-services/xattr.morph
@@ -0,0 +1,8 @@
+name: xattr
+kind: chunk
+configure-commands:
+- cp -r /usr/lib/python2.7/site-packages/cffi .
+build-commands:
+- python setup.py build
+install-commands:
+- python setup.py install --prefix "$PREFIX" --root "$DESTDIR"
diff --git a/systems/openstack-server.morph b/systems/openstack-server.morph
new file mode 100644
index 00000000..1a1a83c1
--- /dev/null
+++ b/systems/openstack-server.morph
@@ -0,0 +1,36 @@
+name: devel-system-x86_64-openstack
+kind: system
+description: A system that is able to build other systems based on the 64-bit x86
+ architecture.
+arch: x86_64
+strata:
+- name: build-essential
+ morph: strata/build-essential.morph
+- name: core
+ morph: strata/core.morph
+- name: foundation
+ morph: strata/foundation.morph
+- name: bsp-x86_64-generic
+ morph: strata/bsp-x86_64-generic.morph
+- name: tools
+ morph: strata/tools.morph
+- name: cloudinit-support
+ morph: strata/cloudinit-support.morph
+- name: virtualization
+ morph: strata/virtualization.morph
+- name: connectivity
+ morph: strata/connectivity.morph
+- name: erlang
+ morph: strata/erlang.morph
+- name: openstack-services
+ morph: strata/openstack-services.morph
+configuration-extensions:
+- set-hostname
+- add-config-files
+- simple-network
+- nfsboot
+- install-files
+- cloud-init
+- openstack-keystone
+- openstack-glance
+- openstack-nova