summaryrefslogtreecommitdiff
path: root/openstack/etc/neutron
diff options
context:
space:
mode:
authorFrancisco Redondo Marchena <francisco.marchena@codethink.co.uk>2014-12-02 16:29:10 +0000
committerFrancisco Redondo Marchena <francisco.marchena@codethink.co.uk>2015-01-22 12:46:26 +0000
commit009601ef4b70872bf57b1d8834a1c69d598ee64c (patch)
tree6c3755b5565be0fb4af24d877202eefc181ed00a /openstack/etc/neutron
parentdca40db051092ddc2022327182cde0578c4a1a7e (diff)
downloaddefinitions-009601ef4b70872bf57b1d8834a1c69d598ee64c.tar.gz
WIP: Add neutron configuration files
Diffstat (limited to 'openstack/etc/neutron')
-rw-r--r--openstack/etc/neutron/api-paste.ini30
-rw-r--r--openstack/etc/neutron/dhcp_agent.ini88
-rw-r--r--openstack/etc/neutron/l3_agent.ini102
-rw-r--r--openstack/etc/neutron/lbaas_agent.ini42
-rw-r--r--openstack/etc/neutron/metadata_agent.ini59
-rw-r--r--openstack/etc/neutron/neutron.conf637
-rw-r--r--openstack/etc/neutron/policy.json138
-rw-r--r--openstack/etc/neutron/rootwrap.conf34
-rw-r--r--openstack/etc/neutron/rootwrap.d/cisco-apic.filters16
-rw-r--r--openstack/etc/neutron/rootwrap.d/debug.filters14
-rw-r--r--openstack/etc/neutron/rootwrap.d/dhcp.filters35
-rw-r--r--openstack/etc/neutron/rootwrap.d/ipset-firewall.filters12
-rw-r--r--openstack/etc/neutron/rootwrap.d/iptables-firewall.filters21
-rw-r--r--openstack/etc/neutron/rootwrap.d/l3.filters48
-rw-r--r--openstack/etc/neutron/rootwrap.d/lbaas-haproxy.filters26
-rw-r--r--openstack/etc/neutron/rootwrap.d/linuxbridge-plugin.filters19
-rw-r--r--openstack/etc/neutron/rootwrap.d/nec-plugin.filters12
-rw-r--r--openstack/etc/neutron/rootwrap.d/ofagent.filters16
-rw-r--r--openstack/etc/neutron/rootwrap.d/openvswitch-plugin.filters22
-rw-r--r--openstack/etc/neutron/rootwrap.d/vpnaas.filters13
20 files changed, 1384 insertions, 0 deletions
diff --git a/openstack/etc/neutron/api-paste.ini b/openstack/etc/neutron/api-paste.ini
new file mode 100644
index 00000000..24193fcd
--- /dev/null
+++ b/openstack/etc/neutron/api-paste.ini
@@ -0,0 +1,30 @@
+[composite:neutron]
+use = egg:Paste#urlmap
+/: neutronversions
+/v2.0: neutronapi_v2_0
+
+[composite:neutronapi_v2_0]
+use = call:neutron.auth:pipeline_factory
+noauth = request_id catch_errors extensions neutronapiapp_v2_0
+keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
+
+[filter:request_id]
+paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:catch_errors]
+paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory
+
+[filter:keystonecontext]
+paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:extensions]
+paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
+
+[app:neutronversions]
+paste.app_factory = neutron.api.versions:Versions.factory
+
+[app:neutronapiapp_v2_0]
+paste.app_factory = neutron.api.v2.router:APIRouter.factory
diff --git a/openstack/etc/neutron/dhcp_agent.ini b/openstack/etc/neutron/dhcp_agent.ini
new file mode 100644
index 00000000..fe46b562
--- /dev/null
+++ b/openstack/etc/neutron/dhcp_agent.ini
@@ -0,0 +1,88 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+# resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+# use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+# enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+# enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+# dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+# dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/openstack/etc/neutron/l3_agent.ini b/openstack/etc/neutron/l3_agent.ini
new file mode 100644
index 00000000..4aa11971
--- /dev/null
+++ b/openstack/etc/neutron/l3_agent.ini
@@ -0,0 +1,102 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+# use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+# handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+# external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+# metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+# send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+# periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+# periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
+
+# The working mode for the agent. Allowed values are:
+# - legacy: this preserves the existing behavior where the L3 agent is
+# deployed on a centralized networking node to provide L3 services
+# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
+# - dvr: this mode enables DVR functionality, and must be used for an L3
+# agent that runs on a compute host.
+# - dvr_snat: this enables centralized SNAT support in conjunction with
+# DVR. This mode must be used for an L3 agent running on a centralized
+# node (or in single-host deployments, e.g. devstack).
+# agent_mode = legacy
+
+# Location to store keepalived and all HA configurations
+# ha_confs_path = $state_path/ha_confs
+
+# VRRP authentication type AH/PASS
+# ha_vrrp_auth_type = PASS
+
+# VRRP authentication password
+# ha_vrrp_auth_password =
+
+# The advertisement interval in seconds
+# ha_vrrp_advert_int = 2
diff --git a/openstack/etc/neutron/lbaas_agent.ini b/openstack/etc/neutron/lbaas_agent.ini
new file mode 100644
index 00000000..8d231b5c
--- /dev/null
+++ b/openstack/etc/neutron/lbaas_agent.ini
@@ -0,0 +1,42 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output).
+# debug = False
+
+# The LBaaS agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+# periodic_interval = 10
+
+# LBaas requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version.
+# Multiple device drivers reflecting different service providers could be specified:
+# device_driver = path.to.provider1.driver.Driver
+# device_driver = path.to.provider2.driver.Driver
+# Default is:
+# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver
+
+[haproxy]
+# Location to store config and state files
+# loadbalancer_state_path = $state_path/lbaas
+
+# The user group
+# user_group = nogroup
+
+# When delete and re-add the same vip, send this many gratuitous ARPs to flush
+# the ARP cache in the Router. Set it below or equal to 0 to disable this feature.
+# send_gratuitous_arp = 3
diff --git a/openstack/etc/neutron/metadata_agent.ini b/openstack/etc/neutron/metadata_agent.ini
new file mode 100644
index 00000000..55bb891f
--- /dev/null
+++ b/openstack/etc/neutron/metadata_agent.ini
@@ -0,0 +1,59 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://localhost:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+# nova_metadata_ip = 127.0.0.1
+
+# TCP Port used by Nova metadata server
+# nova_metadata_port = 8775
+
+# Which protocol to use for requests to Nova metadata server, http or https
+# nova_metadata_protocol = http
+
+# Whether insecure SSL connection should be accepted for Nova metadata server
+# requests
+# nova_metadata_insecure = False
+
+# Client certificate for nova api, needed when nova api requires client
+# certificates
+# nova_client_cert =
+
+# Private key for nova client certificate
+# nova_client_priv_key =
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+# metadata_proxy_shared_secret =
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server. Defaults to
+# half the number of CPU cores
+# metadata_workers =
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 4096
+
+# URL to connect to the cache backend.
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url = memory://?default_ttl=5
diff --git a/openstack/etc/neutron/neutron.conf b/openstack/etc/neutron/neutron.conf
new file mode 100644
index 00000000..d9de9085
--- /dev/null
+++ b/openstack/etc/neutron/neutron.conf
@@ -0,0 +1,637 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+# verbose = False
+
+# =========Start Global Config Option for Distributed L3 Router===============
+# Setting the "router_distributed" flag to "True" will default to the creation
+# of distributed tenant routers. The admin can override this flag by specifying
+# the type of the router on the create request (admin-only attribute). Default
+# value is "False" to support legacy mode (centralized) routers.
+#
+# router_distributed = False
+#
+# ===========End Global Config Option for Distributed L3 Router===============
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+# debug = False
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+# state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+# log_dir =
+
+# publish_errors = False
+
+# Address to bind the API server to
+# bind_host = 0.0.0.0
+
+# Port the bind the API server to
+# bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+# core_plugin =
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+
+# Paste configuration file
+# api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+# auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# DVR Base MAC address. The first 3 octets will remain unchanged. If the
+# 4th octet is not 00, it will also be used. The others will be randomly
+# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
+# avoid mixing them up with MAC's allocated for tenant ports.
+# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
+# The default is 3 octet
+# dvr_base_mac = fa:16:3f:00:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds). Use -1 to
+# tell dnsmasq to use infinite lease times.
+# dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+# allow_overlapping_ips = False
+# Ensure that configured gateway is on subnet. For IPv6, validate only if
+# gateway is not a link local address. Deprecated, to be removed during the
+# K release, at which point the check will be mandatory.
+# force_gateway_on_subnet = True
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# Maximum number of routes per router
+# max_routes = 30
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+# agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Allow automatic rescheduling of routers from dead L3 agents with
+# admin_state_up set to True to alive agents.
+# allow_automatic_l3agent_failover = False
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== items for l3 extension ==============
+# Enable high availability for virtual routers.
+# l3_ha = False
+#
+# Maximum number of l3 agents which a HA router will be scheduled on. If it
+# is set to 0 the router will be scheduled on every agent.
+# max_l3_agents_per_router = 3
+#
+# Minimum number of l3 agents which a HA router will be scheduled on. The
+# default value is 2.
+# min_l3_agents_per_router = 2
+#
+# CIDR of the administrative network if HA mode is enabled
+# l3_ha_net_cidr = 169.254.192.0/18
+# =========== end of items for l3 extension =======
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+# api_workers = 0
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+# rpc_workers = 0
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+# notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+# notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+# nova_url = http://127.0.0.1:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+# nova_region_name =
+
+# Username for connection to nova in admin context
+# nova_admin_username =
+
+# The uuid of the admin nova tenant
+# nova_admin_tenant_id =
+
+# Password for connection to nova in admin context.
+# nova_admin_password =
+
+# Authorization URL for connection to nova in admin context.
+# nova_admin_auth_url =
+
+# CA file for novaclient to verify server certificates
+# nova_ca_certificates_file =
+
+# Boolean to control ignoring SSL errors on the nova url
+# nova_api_insecure = False
+
+# Number of seconds between sending events to nova if there are any events to send
+# send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+#rabbit_userid=guest
+
+# The RabbitMQ password. (string value)
+#rabbit_password=guest
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=oslo
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+[quotas]
+# Default driver to use for quota checks
+# quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+# quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+# default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+# quota_network = 10
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+# quota_subnet = 10
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+# quota_port = 50
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+# quota_security_group = 10
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+# quota_security_group_rule = 100
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitor = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+# Number of firewalls allowed per tenant. A negative value means unlimited.
+# quota_firewall = 1
+
+# Number of firewall policies allowed per tenant. A negative value means
+# unlimited.
+# quota_firewall_policy = 1
+
+# Number of firewall rules allowed per tenant. A negative value means
+# unlimited.
+# quota_firewall_rule = 100
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+# root_helper = sudo
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+# report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_host = 127.0.0.1
+auth_port = 35357
+auth_protocol = http
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite://
+# NOTE: In deployment the [database] section and its connection attribute may
+# be set in the corresponding core plugin '.ini' file. However, it is suggested
+# to put the [database] section and its connection attribute in this
+# configuration file.
+
+# Database engine for which script will be generated when using offline
+# migration
+# engine =
+
+# The SQLAlchemy connection string used to connect to the slave database
+# slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+# max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+# retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+# min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# max_pool_size = 10
+
+# Timeout in seconds before idle sql connections are reaped
+# idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+# max_overflow = 20
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+# connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+# connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
+# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'.
+#service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default
+# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend
+# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default
diff --git a/openstack/etc/neutron/policy.json b/openstack/etc/neutron/policy.json
new file mode 100644
index 00000000..3f692281
--- /dev/null
+++ b/openstack/etc/neutron/policy.json
@@ -0,0 +1,138 @@
+{
+ "context_is_admin": "role:admin",
+ "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
+ "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
+ "admin_only": "rule:context_is_admin",
+ "regular_user": "",
+ "shared": "field:networks:shared=True",
+ "shared_firewalls": "field:firewalls:shared=True",
+ "external": "field:networks:router:external=True",
+ "default": "rule:admin_or_owner",
+
+ "create_subnet": "rule:admin_or_network_owner",
+ "get_subnet": "rule:admin_or_owner or rule:shared",
+ "update_subnet": "rule:admin_or_network_owner",
+ "delete_subnet": "rule:admin_or_network_owner",
+
+ "create_network": "",
+ "get_network": "rule:admin_or_owner or rule:shared or rule:external",
+ "get_network:router:external": "rule:regular_user",
+ "get_network:segments": "rule:admin_only",
+ "get_network:provider:network_type": "rule:admin_only",
+ "get_network:provider:physical_network": "rule:admin_only",
+ "get_network:provider:segmentation_id": "rule:admin_only",
+ "get_network:queue_id": "rule:admin_only",
+ "create_network:shared": "rule:admin_only",
+ "create_network:router:external": "rule:admin_only",
+ "create_network:segments": "rule:admin_only",
+ "create_network:provider:network_type": "rule:admin_only",
+ "create_network:provider:physical_network": "rule:admin_only",
+ "create_network:provider:segmentation_id": "rule:admin_only",
+ "update_network": "rule:admin_or_owner",
+ "update_network:segments": "rule:admin_only",
+ "update_network:shared": "rule:admin_only",
+ "update_network:provider:network_type": "rule:admin_only",
+ "update_network:provider:physical_network": "rule:admin_only",
+ "update_network:provider:segmentation_id": "rule:admin_only",
+ "update_network:router:external": "rule:admin_only",
+ "delete_network": "rule:admin_or_owner",
+
+ "create_port": "",
+ "create_port:mac_address": "rule:admin_or_network_owner",
+ "create_port:fixed_ips": "rule:admin_or_network_owner",
+ "create_port:port_security_enabled": "rule:admin_or_network_owner",
+ "create_port:binding:host_id": "rule:admin_only",
+ "create_port:binding:profile": "rule:admin_only",
+ "create_port:mac_learning_enabled": "rule:admin_or_network_owner",
+ "get_port": "rule:admin_or_owner",
+ "get_port:queue_id": "rule:admin_only",
+ "get_port:binding:vif_type": "rule:admin_only",
+ "get_port:binding:vif_details": "rule:admin_only",
+ "get_port:binding:host_id": "rule:admin_only",
+ "get_port:binding:profile": "rule:admin_only",
+ "update_port": "rule:admin_or_owner",
+ "update_port:fixed_ips": "rule:admin_or_network_owner",
+ "update_port:port_security_enabled": "rule:admin_or_network_owner",
+ "update_port:binding:host_id": "rule:admin_only",
+ "update_port:binding:profile": "rule:admin_only",
+ "update_port:mac_learning_enabled": "rule:admin_or_network_owner",
+ "delete_port": "rule:admin_or_owner",
+
+ "get_router:ha": "rule:admin_only",
+ "create_router": "rule:regular_user",
+ "create_router:external_gateway_info:enable_snat": "rule:admin_only",
+ "create_router:distributed": "rule:admin_only",
+ "create_router:ha": "rule:admin_only",
+ "get_router": "rule:admin_or_owner",
+ "get_router:distributed": "rule:admin_only",
+ "update_router:external_gateway_info:enable_snat": "rule:admin_only",
+ "update_router:distributed": "rule:admin_only",
+ "update_router:ha": "rule:admin_only",
+ "delete_router": "rule:admin_or_owner",
+
+ "add_router_interface": "rule:admin_or_owner",
+ "remove_router_interface": "rule:admin_or_owner",
+
+ "create_firewall": "",
+ "get_firewall": "rule:admin_or_owner",
+ "create_firewall:shared": "rule:admin_only",
+ "get_firewall:shared": "rule:admin_only",
+ "update_firewall": "rule:admin_or_owner",
+ "update_firewall:shared": "rule:admin_only",
+ "delete_firewall": "rule:admin_or_owner",
+
+ "create_firewall_policy": "",
+ "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
+ "create_firewall_policy:shared": "rule:admin_or_owner",
+ "update_firewall_policy": "rule:admin_or_owner",
+ "delete_firewall_policy": "rule:admin_or_owner",
+
+ "create_firewall_rule": "",
+ "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
+ "update_firewall_rule": "rule:admin_or_owner",
+ "delete_firewall_rule": "rule:admin_or_owner",
+
+ "create_qos_queue": "rule:admin_only",
+ "get_qos_queue": "rule:admin_only",
+
+ "update_agent": "rule:admin_only",
+ "delete_agent": "rule:admin_only",
+ "get_agent": "rule:admin_only",
+
+ "create_dhcp-network": "rule:admin_only",
+ "delete_dhcp-network": "rule:admin_only",
+ "get_dhcp-networks": "rule:admin_only",
+ "create_l3-router": "rule:admin_only",
+ "delete_l3-router": "rule:admin_only",
+ "get_l3-routers": "rule:admin_only",
+ "get_dhcp-agents": "rule:admin_only",
+ "get_l3-agents": "rule:admin_only",
+ "get_loadbalancer-agent": "rule:admin_only",
+ "get_loadbalancer-pools": "rule:admin_only",
+
+ "create_floatingip": "rule:regular_user",
+ "update_floatingip": "rule:admin_or_owner",
+ "delete_floatingip": "rule:admin_or_owner",
+ "get_floatingip": "rule:admin_or_owner",
+
+ "create_network_profile": "rule:admin_only",
+ "update_network_profile": "rule:admin_only",
+ "delete_network_profile": "rule:admin_only",
+ "get_network_profiles": "",
+ "get_network_profile": "",
+ "update_policy_profiles": "rule:admin_only",
+ "get_policy_profiles": "",
+ "get_policy_profile": "",
+
+ "create_metering_label": "rule:admin_only",
+ "delete_metering_label": "rule:admin_only",
+ "get_metering_label": "rule:admin_only",
+
+ "create_metering_label_rule": "rule:admin_only",
+ "delete_metering_label_rule": "rule:admin_only",
+ "get_metering_label_rule": "rule:admin_only",
+
+ "get_service_provider": "rule:regular_user",
+ "get_lsn": "rule:admin_only",
+ "create_lsn": "rule:admin_only"
+}
diff --git a/openstack/etc/neutron/rootwrap.conf b/openstack/etc/neutron/rootwrap.conf
new file mode 100644
index 00000000..ab5f4393
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.conf
@@ -0,0 +1,34 @@
+# Configuration for neutron-rootwrap
+# This file should be owned by (and only-writeable by) the root user
+
+[DEFAULT]
+# List of directories to load filter definitions from (separated by ',').
+# These directories MUST all be only writeable by root !
+filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap
+
+# List of directories to search executables in, in case filters do not
+# explicitely specify a full path (separated by ',')
+# If not specified, defaults to system PATH environment variable.
+# These directories MUST all be only writeable by root !
+exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
+
+# Enable logging to syslog
+# Default value is False
+use_syslog=False
+
+# Which syslog facility to use.
+# Valid values include auth, authpriv, syslog, local0, local1...
+# Default value is 'syslog'
+syslog_log_facility=syslog
+
+# Which messages to log.
+# INFO means log all usage
+# ERROR means only log unsuccessful attempts
+syslog_log_level=ERROR
+
+[xenapi]
+# XenAPI configuration is only required by the L2 agent if it is to
+# target a XenServer/XCP compute host's dom0.
+xenapi_connection_url=<None>
+xenapi_connection_username=root
+xenapi_connection_password=<None>
diff --git a/openstack/etc/neutron/rootwrap.d/cisco-apic.filters b/openstack/etc/neutron/rootwrap.d/cisco-apic.filters
new file mode 100644
index 00000000..69e4afcc
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/cisco-apic.filters
@@ -0,0 +1,16 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# cisco-apic filters
+lldpctl: CommandFilter, lldpctl, root
+
+# ip_lib filters
+ip: IpFilter, ip, root
+ip_exec: IpNetnsExecFilter, ip, root
diff --git a/openstack/etc/neutron/rootwrap.d/debug.filters b/openstack/etc/neutron/rootwrap.d/debug.filters
new file mode 100644
index 00000000..b61d9601
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/debug.filters
@@ -0,0 +1,14 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# This is needed because we should ping
+# from inside a namespace which requires root
+ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
+ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
diff --git a/openstack/etc/neutron/rootwrap.d/dhcp.filters b/openstack/etc/neutron/rootwrap.d/dhcp.filters
new file mode 100644
index 00000000..0712ec13
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/dhcp.filters
@@ -0,0 +1,35 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# dhcp-agent
+dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID=
+# dhcp-agent uses kill as well, that's handled by the generic KillFilter
+# it looks like these are the only signals needed, per
+# neutron/agent/linux/dhcp.py
+kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP
+kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
+
+ovs-vsctl: CommandFilter, ovs-vsctl, root
+ivs-ctl: CommandFilter, ivs-ctl, root
+mm-ctl: CommandFilter, mm-ctl, root
+dhcp_release: CommandFilter, dhcp_release, root
+
+# metadata proxy
+metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
+# If installed from source (say, by devstack), the prefix will be
+# /usr/local instead of /usr/bin.
+metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
+# RHEL invocation of the metadata proxy will report /usr/bin/python
+kill_metadata: KillFilter, root, python, -9
+kill_metadata7: KillFilter, root, python2.7, -9
+
+# ip_lib
+ip: IpFilter, ip, root
+ip_exec: IpNetnsExecFilter, ip, root
diff --git a/openstack/etc/neutron/rootwrap.d/ipset-firewall.filters b/openstack/etc/neutron/rootwrap.d/ipset-firewall.filters
new file mode 100644
index 00000000..52c66373
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/ipset-firewall.filters
@@ -0,0 +1,12 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+# neutron/agent/linux/iptables_firewall.py
+# "ipset", "-A", ...
+ipset: CommandFilter, ipset, root
diff --git a/openstack/etc/neutron/rootwrap.d/iptables-firewall.filters b/openstack/etc/neutron/rootwrap.d/iptables-firewall.filters
new file mode 100644
index 00000000..b8a6ab5b
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/iptables-firewall.filters
@@ -0,0 +1,21 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# neutron/agent/linux/iptables_manager.py
+# "iptables-save", ...
+iptables-save: CommandFilter, iptables-save, root
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-save: CommandFilter, ip6tables-save, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
+
+# neutron/agent/linux/iptables_manager.py
+# "iptables", "-A", ...
+iptables: CommandFilter, iptables, root
+ip6tables: CommandFilter, ip6tables, root
diff --git a/openstack/etc/neutron/rootwrap.d/l3.filters b/openstack/etc/neutron/rootwrap.d/l3.filters
new file mode 100644
index 00000000..be69b32c
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/l3.filters
@@ -0,0 +1,48 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# arping
+arping: CommandFilter, arping, root
+
+# l3_agent
+sysctl: CommandFilter, sysctl, root
+route: CommandFilter, route, root
+radvd: CommandFilter, radvd, root
+
+# metadata proxy
+metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
+# If installed from source (say, by devstack), the prefix will be
+# /usr/local instead of /usr/bin.
+metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
+# RHEL invocation of the metadata proxy will report /usr/bin/python
+kill_metadata: KillFilter, root, python, -9
+kill_metadata7: KillFilter, root, python2.7, -9
+kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP
+kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP
+
+# ip_lib
+ip: IpFilter, ip, root
+ip_exec: IpNetnsExecFilter, ip, root
+
+# ovs_lib (if OVSInterfaceDriver is used)
+ovs-vsctl: CommandFilter, ovs-vsctl, root
+
+# iptables_manager
+iptables-save: CommandFilter, iptables-save, root
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-save: CommandFilter, ip6tables-save, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
+
+# Keepalived
+keepalived: CommandFilter, keepalived, root
+kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9
+
+# l3 agent to delete floatingip's conntrack state
+conntrack: CommandFilter, conntrack, root
diff --git a/openstack/etc/neutron/rootwrap.d/lbaas-haproxy.filters b/openstack/etc/neutron/rootwrap.d/lbaas-haproxy.filters
new file mode 100644
index 00000000..b4e1ecba
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/lbaas-haproxy.filters
@@ -0,0 +1,26 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# haproxy
+haproxy: CommandFilter, haproxy, root
+
+# lbaas-agent uses kill as well, that's handled by the generic KillFilter
+kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP
+
+ovs-vsctl: CommandFilter, ovs-vsctl, root
+mm-ctl: CommandFilter, mm-ctl, root
+
+# ip_lib
+ip: IpFilter, ip, root
+ip_exec: IpNetnsExecFilter, ip, root
+route: CommandFilter, route, root
+
+# arping
+arping: CommandFilter, arping, root
diff --git a/openstack/etc/neutron/rootwrap.d/linuxbridge-plugin.filters b/openstack/etc/neutron/rootwrap.d/linuxbridge-plugin.filters
new file mode 100644
index 00000000..03df3959
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/linuxbridge-plugin.filters
@@ -0,0 +1,19 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# linuxbridge-agent
+# unclear whether both variants are necessary, but I'm transliterating
+# from the old mechanism
+brctl: CommandFilter, brctl, root
+bridge: CommandFilter, bridge, root
+
+# ip_lib
+ip: IpFilter, ip, root
+ip_exec: IpNetnsExecFilter, ip, root
diff --git a/openstack/etc/neutron/rootwrap.d/nec-plugin.filters b/openstack/etc/neutron/rootwrap.d/nec-plugin.filters
new file mode 100644
index 00000000..89c4cfe3
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/nec-plugin.filters
@@ -0,0 +1,12 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# nec_neutron_agent
+ovs-vsctl: CommandFilter, ovs-vsctl, root
diff --git a/openstack/etc/neutron/rootwrap.d/ofagent.filters b/openstack/etc/neutron/rootwrap.d/ofagent.filters
new file mode 100644
index 00000000..11e42564
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/ofagent.filters
@@ -0,0 +1,16 @@
+# neutron-rootwrap command filters for nodes on which
+# neutron-ofagent-agent is expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# ovs_lib
+ovs-vsctl: CommandFilter, ovs-vsctl, root
+
+# ip_lib
+ip: IpFilter, ip, root
+ip_exec: IpNetnsExecFilter, ip, root
diff --git a/openstack/etc/neutron/rootwrap.d/openvswitch-plugin.filters b/openstack/etc/neutron/rootwrap.d/openvswitch-plugin.filters
new file mode 100644
index 00000000..b63a83b9
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/openvswitch-plugin.filters
@@ -0,0 +1,22 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+# openvswitch-agent
+# unclear whether both variants are necessary, but I'm transliterating
+# from the old mechanism
+ovs-vsctl: CommandFilter, ovs-vsctl, root
+ovs-ofctl: CommandFilter, ovs-ofctl, root
+kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
+ovsdb-client: CommandFilter, ovsdb-client, root
+xe: CommandFilter, xe, root
+
+# ip_lib
+ip: IpFilter, ip, root
+ip_exec: IpNetnsExecFilter, ip, root
diff --git a/openstack/etc/neutron/rootwrap.d/vpnaas.filters b/openstack/etc/neutron/rootwrap.d/vpnaas.filters
new file mode 100644
index 00000000..7848136b
--- /dev/null
+++ b/openstack/etc/neutron/rootwrap.d/vpnaas.filters
@@ -0,0 +1,13 @@
+# neutron-rootwrap command filters for nodes on which neutron is
+# expected to control network
+#
+# This file should be owned by (and only-writeable by) the root user
+
+# format seems to be
+# cmd-name: filter-name, raw-command, user, args
+
+[Filters]
+
+ip: IpFilter, ip, root
+ip_exec: IpNetnsExecFilter, ip, root
+openswan: CommandFilter, ipsec, root