summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTiago Gomes <tiago.gomes@codethink.co.uk>2015-05-19 14:13:07 +0000
committerPedro Alvarez <pedro.alvarez@codethink.co.uk>2015-05-22 23:22:09 +0000
commit4f6f5c5b2fa6cdcdc6ca038ddaa22688fe02244a (patch)
tree61f1e488507c2fc359607b248815a123264a5aaf
parent44a2822afbdd2cc8264551dc2ed5427110afc6cc (diff)
downloaddefinitions-4f6f5c5b2fa6cdcdc6ca038ddaa22688fe02244a.tar.gz
Neutron: update configuration files to Kilo
This commit updates the Neutron's configuration files to be the factory versions for the Kilo release. Our custom configuration will be re-added in a following commit. Also install configuration files which will are not going to be modified in the following commit, in the post-install-commands for the chunk; as opposite to having them laying around in the repo and installing them with the install-files configuration extension.
-rw-r--r--openstack/manifest61
-rw-r--r--openstack/usr/share/openstack/neutron/api-paste.ini30
-rw-r--r--openstack/usr/share/openstack/neutron/dhcp_agent.ini30
-rw-r--r--openstack/usr/share/openstack/neutron/fwaas_driver.ini3
-rw-r--r--openstack/usr/share/openstack/neutron/l3_agent.ini44
-rw-r--r--openstack/usr/share/openstack/neutron/lbaas_agent.ini42
-rw-r--r--openstack/usr/share/openstack/neutron/metadata_agent.ini26
-rw-r--r--openstack/usr/share/openstack/neutron/metering_agent.ini18
-rw-r--r--openstack/usr/share/openstack/neutron/neutron.conf589
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini114
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README3
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README6
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini29
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini15
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini100
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini76
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini26
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini41
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini63
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini50
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini78
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini31
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini19
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini53
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini100
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini15
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini118
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini52
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini4
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini28
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini30
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini13
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini31
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini79
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/nec/nec.ini60
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini41
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini35
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini26
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini190
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini14
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini44
-rw-r--r--openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini200
-rw-r--r--openstack/usr/share/openstack/neutron/policy.json138
-rw-r--r--openstack/usr/share/openstack/neutron/vpn_agent.ini14
-rw-r--r--strata/openstack-services/neutron.morph24
45 files changed, 596 insertions, 2207 deletions
diff --git a/openstack/manifest b/openstack/manifest
index 8193309c..1ecaa5ec 100644
--- a/openstack/manifest
+++ b/openstack/manifest
@@ -43,70 +43,13 @@ template 0100644 0 0 /etc/tempest/tempest.conf
0040755 0 0 /usr/share/openstack/neutron
0100644 0 0 /usr/share/openstack/neutron-config.yml
0100644 0 0 /usr/share/openstack/neutron-db.yml
-0100644 0 0 /usr/share/openstack/neutron/neutron.conf
-0100644 0 0 /usr/share/openstack/neutron/api-paste.ini
-0100644 0 0 /usr/share/openstack/neutron/policy.json
-0100644 0 0 /usr/share/openstack/neutron/l3_agent.ini
0100644 0 0 /usr/share/openstack/neutron/dhcp_agent.ini
-0100644 0 0 /usr/share/openstack/neutron/lbaas_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/l3_agent.ini
+0100644 0 0 /usr/share/openstack/neutron/neutron.conf
0100644 0 0 /usr/share/openstack/neutron/metadata_agent.ini
-0100644 0 0 /usr/share/openstack/neutron/fwaas_driver.ini
-0100644 0 0 /usr/share/openstack/neutron/metering_agent.ini
-0100644 0 0 /usr/share/openstack/neutron/vpn_agent.ini
0040755 0 0 /usr/share/openstack/neutron/plugins/
-0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch
-0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl
-0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs
-0040755 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs
-0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README
-0100644 0 0 /usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README
-0040755 0 0 /usr/share/openstack/neutron/plugins/brocade
-0100644 0 0 /usr/share/openstack/neutron/plugins/brocade/brocade.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/cisco
-0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/embrane
-0100644 0 0 /usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/hyperv
-0100644 0 0 /usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/ibm
-0100644 0 0 /usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/linuxbridge
-0100644 0 0 /usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/metaplugin
-0100644 0 0 /usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/midonet
-0100644 0 0 /usr/share/openstack/neutron/plugins/midonet/midonet.ini
0040755 0 0 /usr/share/openstack/neutron/plugins/ml2
0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini
-0100644 0 0 /usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/mlnx
-0100644 0 0 /usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/nec
-0100644 0 0 /usr/share/openstack/neutron/plugins/nec/nec.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/nuage
-0100644 0 0 /usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/oneconvergence
-0100644 0 0 /usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/opencontrail
-0100644 0 0 /usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/openvswitch
-0100644 0 0 /usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/plumgrid
-0100644 0 0 /usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini
-0040755 0 0 /usr/share/openstack/neutron/plugins/vmware
-0100644 0 0 /usr/share/openstack/neutron/plugins/vmware/nsx.ini
0040755 0 0 /usr/share/openstack/nova
0100644 0 0 /usr/share/openstack/nova-config.yml
0100644 0 0 /usr/share/openstack/nova-db.yml
diff --git a/openstack/usr/share/openstack/neutron/api-paste.ini b/openstack/usr/share/openstack/neutron/api-paste.ini
deleted file mode 100644
index bbcd4152..00000000
--- a/openstack/usr/share/openstack/neutron/api-paste.ini
+++ /dev/null
@@ -1,30 +0,0 @@
-[composite:neutron]
-use = egg:Paste#urlmap
-/: neutronversions
-/v2.0: neutronapi_v2_0
-
-[composite:neutronapi_v2_0]
-use = call:neutron.auth:pipeline_factory
-noauth = request_id catch_errors extensions neutronapiapp_v2_0
-keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
-
-[filter:request_id]
-paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory
-
-[filter:catch_errors]
-paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory
-
-[filter:keystonecontext]
-paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-
-[filter:extensions]
-paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
-
-[app:neutronversions]
-paste.app_factory = neutron.api.versions:Versions.factory
-
-[app:neutronapiapp_v2_0]
-paste.app_factory = neutron.api.v2.router:APIRouter.factory
diff --git a/openstack/usr/share/openstack/neutron/dhcp_agent.ini b/openstack/usr/share/openstack/neutron/dhcp_agent.ini
index c6c2b9a7..a0adccaa 100644
--- a/openstack/usr/share/openstack/neutron/dhcp_agent.ini
+++ b/openstack/usr/share/openstack/neutron/dhcp_agent.ini
@@ -1,7 +1,6 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
-use_syslog = True
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
@@ -14,7 +13,7 @@ use_syslog = True
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
@@ -29,18 +28,20 @@ interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
-dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
+# iproute2 package that supports namespaces). This option is deprecated and
+# will be removed in a future release, at which point the old behavior of
+# use_namespaces = True will be enforced.
+# use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
-enable_isolated_metadata = True
+# enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
@@ -73,16 +74,15 @@ enable_isolated_metadata = True
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
+# Use broadcast in DHCP replies
+# dhcp_broadcast_reply = False
-# dhcp_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the dhcp agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a dhcp server is disabled.
-# dhcp_delete_namespaces = False
+# dhcp_delete_namespaces, which is True by default, can be set to False if
+# namespaces can't be deleted cleanly on the host running the DHCP agent.
+# Disable this if you hit the issue in
+# https://bugs.launchpad.net/neutron/+bug/1052535 or if
+# you are sure that your version of iproute suffers from the problem.
+# dhcp_delete_namespaces = True
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
diff --git a/openstack/usr/share/openstack/neutron/fwaas_driver.ini b/openstack/usr/share/openstack/neutron/fwaas_driver.ini
deleted file mode 100644
index 41f761ab..00000000
--- a/openstack/usr/share/openstack/neutron/fwaas_driver.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[fwaas]
-#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
-#enabled = True
diff --git a/openstack/usr/share/openstack/neutron/l3_agent.ini b/openstack/usr/share/openstack/neutron/l3_agent.ini
index 000cd997..0d56436b 100644
--- a/openstack/usr/share/openstack/neutron/l3_agent.ini
+++ b/openstack/usr/share/openstack/neutron/l3_agent.ini
@@ -1,7 +1,6 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
-use_syslog = True
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
@@ -9,7 +8,7 @@ use_syslog = True
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
@@ -20,8 +19,10 @@ interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
+# iproute2 package that supports namespaces). This option is deprecated and
+# will be removed in a future release, at which point the old behavior of
+# use_namespaces = True will be enforced.
+# use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
@@ -35,6 +36,20 @@ use_namespaces = True
# must be left empty.
# gateway_external_network_id =
+# With IPv6, the network used for the external gateway does not need
+# to have an associated subnet, since the automatically assigned
+# link-local address (LLA) can be used. However, an IPv6 gateway address
+# is needed for use as the next-hop for the default route. If no IPv6
+# gateway address is configured here, (and only then) the neutron router
+# will be configured to get its default route from router advertisements (RAs)
+# from the upstream router; in which case the upstream router must also be
+# configured to send these RAs.
+# The ipv6_gateway, when configured, should be the LLA of the interface
+# on the upstream router. If a next-hop using a global unique address (GUA)
+# is desired, it needs to be done via a subnet allocated to the network
+# and not through this parameter.
+# ipv6_gateway =
+
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
@@ -44,7 +59,7 @@ use_namespaces = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
-external_network_bridge = br-ex
+# external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
# metadata_port = 9697
@@ -64,16 +79,19 @@ external_network_bridge = br-ex
# if the Nova metadata server is not available
# enable_metadata_proxy = True
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
+# Iptables mangle mark used to mark metadata valid requests
+# metadata_access_mark = 0x1
+
+# Iptables mangle mark used to mark ingress from external network
+# external_ingress_mark = 0x2
-# router_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the L3 agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
+# router_delete_namespaces, which is True by default, can be set to False if
+# namespaces can't be deleted cleanly on the host running the L3 agent.
+# Disable this if you hit the issue in
+# https://bugs.launchpad.net/neutron/+bug/1052535 or if
+# you are sure that your version of iproute suffers from the problem.
# If True, namespaces will be deleted when a router is destroyed.
-# router_delete_namespaces = False
+# router_delete_namespaces = True
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
diff --git a/openstack/usr/share/openstack/neutron/lbaas_agent.ini b/openstack/usr/share/openstack/neutron/lbaas_agent.ini
deleted file mode 100644
index 68a2759e..00000000
--- a/openstack/usr/share/openstack/neutron/lbaas_agent.ini
+++ /dev/null
@@ -1,42 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output).
-# debug = False
-
-# The LBaaS agent will resync its state with Neutron to recover from any
-# transient notification or rpc errors. The interval is number of
-# seconds between attempts.
-# periodic_interval = 10
-
-# LBaas requires an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP,
-# BigSwitch/Floodlight)
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-# ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version.
-# Multiple device drivers reflecting different service providers could be specified:
-# device_driver = path.to.provider1.driver.Driver
-# device_driver = path.to.provider2.driver.Driver
-# Default is:
-# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver
-
-[haproxy]
-# Location to store config and state files
-# loadbalancer_state_path = $state_path/lbaas
-
-# The user group
-# user_group = nogroup
-
-# When delete and re-add the same vip, send this many gratuitous ARPs to flush
-# the ARP cache in the Router. Set it below or equal to 0 to disable this feature.
-# send_gratuitous_arp = 3
diff --git a/openstack/usr/share/openstack/neutron/metadata_agent.ini b/openstack/usr/share/openstack/neutron/metadata_agent.ini
index ed238770..4a0331ee 100644
--- a/openstack/usr/share/openstack/neutron/metadata_agent.ini
+++ b/openstack/usr/share/openstack/neutron/metadata_agent.ini
@@ -1,24 +1,23 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = True
-use_syslog = True
# The Neutron user information for accessing the Neutron API.
-auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
-auth_region = regionOne
+auth_url = http://localhost:5000/v2.0
+auth_region = RegionOne
# Turn off verification of the certificate for ssl
# auth_insecure = False
# Certificate Authority public key (CA cert) file for ssl
# auth_ca_cert =
-admin_tenant_name = service
-admin_user = {{ NEUTRON_SERVICE_USER }}
-admin_password = {{ NEUTRON_SERVICE_PASSWORD }}
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
-nova_metadata_ip = {{ CONTROLLER_HOST_ADDRESS }}
+# nova_metadata_ip = 127.0.0.1
# TCP Port used by Nova metadata server
# nova_metadata_port = 8775
@@ -40,12 +39,21 @@ nova_metadata_ip = {{ CONTROLLER_HOST_ADDRESS }}
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
-# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
-metadata_proxy_shared_secret = {{ METADATA_PROXY_SHARED_SECRET }}
+# Server. NOTE: Nova uses the same config key, but in [neutron] section.
+# metadata_proxy_shared_secret =
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
+# Metadata Proxy UNIX domain socket mode, 3 values allowed:
+# 'deduce': deduce mode from metadata_proxy_user/group values,
+# 'user': set metadata proxy socket mode to 0o644, to use when
+# metadata_proxy_user is agent effective user or root,
+# 'group': set metadata proxy socket mode to 0o664, to use when
+# metadata_proxy_group is agent effective group,
+# 'all': set metadata proxy socket mode to 0o666, to use otherwise.
+# metadata_proxy_socket_mode = deduce
+
# Number of separate worker processes for metadata server. Defaults to
# half the number of CPU cores
# metadata_workers =
diff --git a/openstack/usr/share/openstack/neutron/metering_agent.ini b/openstack/usr/share/openstack/neutron/metering_agent.ini
deleted file mode 100644
index 88826ce7..00000000
--- a/openstack/usr/share/openstack/neutron/metering_agent.ini
+++ /dev/null
@@ -1,18 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = True
-
-# Default driver:
-# driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver
-# Example of non-default driver
-# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver
-
-# Interval between two metering measures
-# measure_interval = 30
-
-# Interval between two metering reports
-# report_interval = 300
-
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# use_namespaces = True
diff --git a/openstack/usr/share/openstack/neutron/neutron.conf b/openstack/usr/share/openstack/neutron/neutron.conf
index 51de7464..ee42954b 100644
--- a/openstack/usr/share/openstack/neutron/neutron.conf
+++ b/openstack/usr/share/openstack/neutron/neutron.conf
@@ -17,10 +17,7 @@
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
+# state_path = /var/lib/neutron
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
@@ -32,8 +29,7 @@ lock_path = $state_path/lock
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
-use_syslog = True
-
+# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
@@ -61,7 +57,7 @@ use_syslog = True
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
-core_plugin = ml2
+# core_plugin =
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
@@ -70,15 +66,22 @@ core_plugin = ml2
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
-service_plugins = router
+# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
# Paste configuration file
-api_paste_config = api-paste.ini
+# api_paste_config = api-paste.ini
+
+# (StrOpt) Hostname to be used by the neutron server, agents and services
+# running on this machine. All the agents and services running on this machine
+# must use the same host value.
+# The default value is hostname of the machine.
+#
+# host =
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
+# auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
@@ -115,7 +118,7 @@ auth_strategy = keystone
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
-allow_overlapping_ips = True
+# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet. For IPv6, validate only if
# gateway is not a link local address. Deprecated, to be removed during the
# K release, at which point the check will be mandatory.
@@ -140,6 +143,29 @@ allow_overlapping_ips = True
# Maximum number of routes per router
# max_routes = 30
+# Default Subnet Pool to be used for IPv4 subnet-allocation.
+# Specifies by UUID the pool to be used in case of subnet-create being called
+# without a subnet-pool ID. The default of None means that no pool will be
+# used unless passed explicitly to subnet create. If no pool is used, then a
+# CIDR must be passed to create a subnet and that subnet will not be allocated
+# from any pool; it will be considered part of the tenant's private address
+# space.
+# default_ipv4_subnet_pool =
+
+# Default Subnet Pool to be used for IPv6 subnet-allocation.
+# Specifies by UUID the pool to be used in case of subnet-create being
+# called without a subnet-pool ID. Set to "prefix_delegation"
+# to enable IPv6 Prefix Delegation in a PD-capable environment.
+# See the description for default_ipv4_subnet_pool for more information.
+# default_ipv6_subnet_pool =
+
+# =========== items for MTU selection and advertisement =============
+# Advertise MTU. If True, effort is made to advertise MTU
+# settings to VMs via network methods (ie. DHCP and RA MTU options)
+# when the network's preferred MTU is known.
+# advertise_mtu = False
+# ======== end of items for MTU selection and advertisement =========
+
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
@@ -154,6 +180,23 @@ allow_overlapping_ips = True
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+# (StrOpt) Representing the resource type whose load is being reported by
+# the agent.
+# This can be 'networks','subnets' or 'ports'. When specified (Default is networks),
+# the server will extract particular load sent as part of its agent configuration object
+# from the agent report state, which is the number of resources being consumed, at
+# every report_interval.
+# dhcp_load_type can be used in combination with network_scheduler_driver =
+# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
+# When the network_scheduler_driver is WeightScheduler, dhcp_load_type can
+# be configured to represent the choice for the resource being balanced.
+# Example: dhcp_load_type = networks
+# Values:
+# networks - number of networks hosted on the agent
+# subnets - number of subnets associated with the networks hosted on the agent
+# ports - number of ports associated with the networks hosted on the agent
+# dhcp_load_type = networks
+
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
@@ -167,10 +210,25 @@ allow_overlapping_ips = True
# admin_state_up set to True to alive agents.
# allow_automatic_l3agent_failover = False
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
+# Allow automatic removal of networks from dead DHCP agents with
+# admin_state_up set to True.
+# Networks could then be rescheduled if network_auto_schedule is True
+# allow_automatic_dhcp_failover = True
+
+# Number of DHCP agents scheduled to host a tenant network.
+# If this number is greater than 1, the scheduler automatically
+# assigns multiple DHCP agents for a given tenant network,
+# providing high availability for DHCP service.
# dhcp_agents_per_network = 1
+# Enable services on agents with admin_state_up False.
+# If this option is False, when admin_state_up of an agent is turned to
+# False, services on it will be disabled. If this option is True, services
+# on agents with admin_state_up False keep available and manual scheduling
+# to such agents is available. Agents with admin_state_up False are not
+# selected for automatic scheduling regardless of this option.
+# enable_services_on_agents_with_admin_state_down = False
+
# =========== end of items for agent scheduler extension =====
# =========== items for l3 extension ==============
@@ -187,8 +245,39 @@ allow_overlapping_ips = True
#
# CIDR of the administrative network if HA mode is enabled
# l3_ha_net_cidr = 169.254.192.0/18
+#
+# Enable snat by default on external gateway when available
+# enable_snat_by_default = True
# =========== end of items for l3 extension =======
+# =========== items for metadata proxy configuration ==============
+# User (uid or name) running metadata proxy after its initialization
+# (if empty: agent effective user)
+# metadata_proxy_user =
+
+# Group (gid or name) running metadata proxy after its initialization
+# (if empty: agent effective group)
+# metadata_proxy_group =
+
+# Enable/Disable log watch by metadata proxy, it should be disabled when
+# metadata_proxy_user/group is not allowed to read/write its log file and
+# 'copytruncate' logrotate option must be used if logrotate is enabled on
+# metadata proxy log files. Option default value is deduced from
+# metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent
+# effective user id/name.
+# metadata_proxy_watch_log =
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+# =========== end of items for metadata proxy configuration ==============
+
+# ========== items for VLAN trunking networks ==========
+# Setting this flag to True will allow plugins that support it to
+# create VLAN transparent networks. This flag has no effect for
+# plugins that do not support VLAN transparent networks.
+# vlan_transparent = False
+# ========== end of items for VLAN trunking networks ==========
+
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
@@ -202,6 +291,18 @@ allow_overlapping_ips = True
# enabled for various plugins for compatibility.
# rpc_workers = 0
+# Timeout for client connections socket operations. If an
+# incoming connection is idle for this number of seconds it
+# will be closed. A value of '0' means wait forever. (integer
+# value)
+# client_socket_timeout = 900
+
+# wsgi keepalive option. Determines if connections are allowed to be held open
+# by clients after a request is fulfilled. A value of False will ensure that
+# the socket connection will be explicitly closed once a response has been
+# sent to the client.
+# wsgi_keep_alive = True
+
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
@@ -231,32 +332,36 @@ allow_overlapping_ips = True
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
-
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
+# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
+# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ CONTROLLER_HOST_ADDRESS }}:8774/v2
+# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = regionOne
+# nova_region_name =
# Username for connection to nova in admin context
-nova_admin_username = {{ NOVA_SERVICE_USER }}
+# nova_admin_username =
# The uuid of the admin nova tenant
-nova_admin_tenant_id = {{ SERVICE_TENANT_ID }}
+# nova_admin_tenant_id =
+
+# The name of the admin nova tenant. If the uuid of the admin nova tenant
+# is set, this is optional. Useful for cases where the uuid of the admin
+# nova tenant is not available when configuration is being done.
+# nova_admin_tenant_name =
# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_SERVICE_PASSWORD }}
+# nova_admin_password =
# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
+# nova_admin_auth_url =
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
@@ -275,42 +380,42 @@ nova_admin_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
# Use durable queues in amqp. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-#amqp_durable_queues=false
+# amqp_durable_queues=false
# Auto-delete queues in amqp. (boolean value)
-#amqp_auto_delete=false
+# amqp_auto_delete=false
# Size of RPC connection pool. (integer value)
-#rpc_conn_pool_size=30
+# rpc_conn_pool_size=30
# Qpid broker hostname. (string value)
-#qpid_hostname=localhost
+# qpid_hostname=localhost
# Qpid broker port. (integer value)
-#qpid_port=5672
+# qpid_port=5672
# Qpid HA cluster host:port pairs. (list value)
-#qpid_hosts=$qpid_hostname:$qpid_port
+# qpid_hosts=$qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
-#qpid_username=
+# qpid_username=
# Password for Qpid connection. (string value)
-#qpid_password=
+# qpid_password=
# Space separated list of SASL mechanisms to use for auth.
# (string value)
-#qpid_sasl_mechanisms=
+# qpid_sasl_mechanisms=
# Seconds between connection keepalive heartbeats. (integer
# value)
-#qpid_heartbeat=60
+# qpid_heartbeat=60
# Transport to use, either 'tcp' or 'ssl'. (string value)
-#qpid_protocol=tcp
+# qpid_protocol=tcp
# Whether to disable the Nagle algorithm. (boolean value)
-#qpid_tcp_nodelay=true
+# qpid_tcp_nodelay=true
# The qpid topology version to use. Version 1 is what was
# originally used by impl_qpid. Version 2 includes some
@@ -318,136 +423,136 @@ nova_admin_auth_url = http://{{ CONTROLLER_HOST_ADDRESS }}:35357/v2.0
# to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break.
# (integer value)
-#qpid_topology_version=1
+# qpid_topology_version=1
# SSL version to use (valid only if SSL enabled). valid values
# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
# distributions. (string value)
-#kombu_ssl_version=
+# kombu_ssl_version=
# SSL key file (valid only if SSL enabled). (string value)
-#kombu_ssl_keyfile=
+# kombu_ssl_keyfile=
# SSL cert file (valid only if SSL enabled). (string value)
-#kombu_ssl_certfile=
+# kombu_ssl_certfile=
# SSL certification authority file (valid only if SSL
# enabled). (string value)
-#kombu_ssl_ca_certs=
+# kombu_ssl_ca_certs=
# How long to wait before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
-#kombu_reconnect_delay=1.0
+# kombu_reconnect_delay=1.0
# The RabbitMQ broker address where a single node is used.
# (string value)
-rabbit_host={{ RABBITMQ_HOST }}
+# rabbit_host=localhost
# The RabbitMQ broker port where a single node is used.
# (integer value)
-rabbit_port={{ RABBITMQ_PORT }}
+# rabbit_port=5672
# RabbitMQ HA cluster host:port pairs. (list value)
-#rabbit_hosts=$rabbit_host:$rabbit_port
+# rabbit_hosts=$rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
-#rabbit_use_ssl=false
+# rabbit_use_ssl=false
# The RabbitMQ userid. (string value)
-rabbit_userid={{ RABBITMQ_USER }}
+# rabbit_userid=guest
# The RabbitMQ password. (string value)
-rabbit_password={{ RABBITMQ_PASSWORD }}
+# rabbit_password=guest
# the RabbitMQ login method (string value)
-#rabbit_login_method=AMQPLAIN
+# rabbit_login_method=AMQPLAIN
# The RabbitMQ virtual host. (string value)
-#rabbit_virtual_host=/
+# rabbit_virtual_host=/
# How frequently to retry connecting with RabbitMQ. (integer
# value)
-#rabbit_retry_interval=1
+# rabbit_retry_interval=1
# How long to backoff for between retries when connecting to
# RabbitMQ. (integer value)
-#rabbit_retry_backoff=2
+# rabbit_retry_backoff=2
# Maximum number of RabbitMQ connection retries. Default is 0
# (infinite retry count). (integer value)
-#rabbit_max_retries=0
+# rabbit_max_retries=0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
# this option, you must wipe the RabbitMQ database. (boolean
# value)
-#rabbit_ha_queues=false
+# rabbit_ha_queues=false
# If passed, use a fake RabbitMQ provider. (boolean value)
-#fake_rabbit=false
+# fake_rabbit=false
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
# interface, or IP. The "host" option should point or resolve
# to this address. (string value)
-#rpc_zmq_bind_address=*
+# rpc_zmq_bind_address=*
# MatchMaker driver. (string value)
-#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+# rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
# ZeroMQ receiver listening port. (integer value)
-#rpc_zmq_port=9501
+# rpc_zmq_port=9501
# Number of ZeroMQ contexts, defaults to 1. (integer value)
-#rpc_zmq_contexts=1
+# rpc_zmq_contexts=1
# Maximum number of ingress messages to locally buffer per
# topic. Default is unlimited. (integer value)
-#rpc_zmq_topic_backlog=<None>
+# rpc_zmq_topic_backlog=
# Directory for holding IPC sockets. (string value)
-#rpc_zmq_ipc_dir=/var/run/openstack
+# rpc_zmq_ipc_dir=/var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP
# address. Must match "host" option, if running Nova. (string
# value)
-#rpc_zmq_host=oslo
+# rpc_zmq_host=oslo
# Seconds to wait before a cast expires (TTL). Only supported
# by impl_zmq. (integer value)
-#rpc_cast_timeout=30
+# rpc_cast_timeout=30
# Heartbeat frequency. (integer value)
-#matchmaker_heartbeat_freq=300
+# matchmaker_heartbeat_freq=300
# Heartbeat time-to-live. (integer value)
-#matchmaker_heartbeat_ttl=600
+# matchmaker_heartbeat_ttl=600
# Size of RPC greenthread pool. (integer value)
-#rpc_thread_pool_size=64
+# rpc_thread_pool_size=64
# Driver or drivers to handle sending notifications. (multi
# valued)
-notification_driver=neutron.openstack.common.notifier.rpc_notifier
+# notification_driver=
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
-#notification_topics=notifications
+# notification_topics=notifications
# Seconds to wait for a response from a call. (integer value)
-#rpc_response_timeout=60
+# rpc_response_timeout=60
# A URL representing the messaging driver to use and its full
# configuration. If not set, we fall back to the rpc_backend
# option and driver specific configuration. (string value)
-#transport_url=<None>
+# transport_url=
# The messaging driver to use, defaults to rabbit. Other
# drivers include qpid and zmq. (string value)
-rpc_backend=rabbit
+# rpc_backend=rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the
# transport_url option. (string value)
-#control_exchange=openstack
+# control_exchange=openstack
[matchmaker_redis]
@@ -457,13 +562,13 @@ rpc_backend=rabbit
#
# Host to locate redis. (string value)
-#host=127.0.0.1
+# host=127.0.0.1
# Use this port to connect to redis host. (integer value)
-#port=6379
+# port=6379
# Password for Redis server (optional). (string value)
-#password=<None>
+# password=
[matchmaker_ring]
@@ -474,13 +579,14 @@ rpc_backend=rabbit
# Matchmaker ring file (JSON). (string value)
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
-#ringfile=/etc/oslo/matchmaker_ring.json
+# ringfile=/etc/oslo/matchmaker_ring.json
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
+# This option is deprecated for removal in the M release, please refrain from using it
# quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
@@ -523,6 +629,16 @@ rpc_backend=rabbit
# and that is the reason why quota is possible.
# quota_health_monitor = -1
+# Number of loadbalancers allowed per tenant. A negative value means unlimited.
+# quota_loadbalancer = 10
+
+# Number of listeners allowed per tenant. A negative value means unlimited.
+# quota_listener = -1
+
+# Number of v2 health monitors allowed per tenant. A negative value means
+# unlimited. These health monitors exist under the lbaas v2 API
+# quota_healthmonitor = -1
+
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
@@ -543,9 +659,29 @@ rpc_backend=rabbit
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
+# Change to "sudo" to skip the filtering and just run the command directly
# root_helper = sudo
-root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
+
+# Set to true to add comments to generated iptables rules that describe
+# each rule's purpose. (System must support the iptables comments module.)
+# comment_iptables_rules = True
+
+# Root helper daemon application to use when possible.
+# root_helper_daemon =
+
+# Use the root helper when listing the namespaces on a system. This may not
+# be required depending on the security configuration. If the root helper is
+# not required, set this to False for a performance improvement.
+# use_helper_for_ns_read = True
+
+# The interval to check external processes for failure in seconds (0=disabled)
+# check_child_processes_interval = 60
+
+# Action to take when an external process spawned by an agent dies
+# Values:
+# respawn - Respawns the external process
+# exit - Exits the agent
+# check_child_processes_action = respawn
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
@@ -555,11 +691,11 @@ root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
# =========== end of items for agent management extension =====
[keystone_authtoken]
-auth_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:5000/v2.0
-identity_uri = http://{{ CONTROLLER_HOST_ADDRESS }}:35357
-admin_tenant_name = service
-admin_user = {{ NEUTRON_SERVICE_USER }}
-admin_password = {{ NEUTRON_SERVICE_PASSWORD }}
+auth_uri = http://127.0.0.1:35357/v2.0/
+identity_uri = http://127.0.0.1:5000
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
[database]
# This line MUST be changed to actually run the plugin.
@@ -572,8 +708,6 @@ admin_password = {{ NEUTRON_SERVICE_PASSWORD }}
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
-#connection=sqlite:////var/lib/neutron/neutron.sqlite
-connection=postgresql://{{ NEUTRON_DB_USER }}:{{ NEUTRON_DB_PASSWORD }}@{{ CONTROLLER_HOST_ADDRESS }}/neutron
# Database engine for which script will be generated when using offline
# migration
@@ -611,30 +745,265 @@ connection=postgresql://{{ NEUTRON_DB_USER }}:{{ NEUTRON_DB_PASSWORD }}@{{ CONTR
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
-# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'.
-#service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default
-# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend
-# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default
+[nova]
+# Name of the plugin to load
+# auth_plugin =
+
+# Config Section from which to load plugin specific options
+# auth_section =
+
+# PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# cafile =
+
+# PEM encoded client certificate cert file
+# certfile =
+
+# Verify HTTPS connections.
+# insecure = False
+
+# PEM encoded client certificate key file
+# keyfile =
+
+# Name of nova region to use. Useful if keystone manages more than one region.
+# region_name =
+
+# Timeout value for http requests
+# timeout =
+
+[oslo_concurrency]
+
+# Directory to use for lock files. For security, the specified directory should
+# only be writable by the user running the processes that need locking.
+# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
+# a lock path must be set.
+lock_path = $state_path/lock
+
+# Enables or disables inter-process locks.
+# disable_process_locking = False
+
+[oslo_policy]
+
+# The JSON file that defines policies.
+# policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# policy_default_rule = default
+
+# Directories where policy configuration files are stored.
+# They can be relative to any directory in the search path defined by the
+# config_dir option, or absolute paths. The file defined by policy_file
+# must exist for these directories to be searched. Missing or empty
+# directories are ignored.
+# policy_dirs = policy.d
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# Address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+# server_request_prefix = exclusive
+
+# Address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+# broadcast_prefix = broadcast
+
+# Address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+# group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+# container_name =
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+# idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+# trace = false
+
+# CA certificate PEM file for verifing server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+# ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+# ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+# ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+# ssl_key_password =
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+# allow_insecure_clients = false
+
+
+[oslo_messaging_qpid]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+# amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+# amqp_auto_delete = false
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+# rpc_conn_pool_size = 30
+
+# Qpid broker hostname. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_hostname
+# qpid_hostname = localhost
+
+# Qpid broker port. (integer value)
+# Deprecated group/name - [DEFAULT]/qpid_port
+# qpid_port = 5672
+
+# Qpid HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/qpid_hosts
+# qpid_hosts = $qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_username
+# qpid_username =
+
+# Password for Qpid connection. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_password
+# qpid_password =
+
+# Space separated list of SASL mechanisms to use for auth. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
+# qpid_sasl_mechanisms =
+
+# Seconds between connection keepalive heartbeats. (integer value)
+# Deprecated group/name - [DEFAULT]/qpid_heartbeat
+# qpid_heartbeat = 60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_protocol
+# qpid_protocol = tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
+# qpid_tcp_nodelay = true
+
+# The number of prefetched messages held by receiver. (integer value)
+# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
+# qpid_receiver_capacity = 1
+
+# The qpid topology version to use. Version 1 is what was originally used by
+# impl_qpid. Version 2 includes some backwards-incompatible changes that allow
+# broker federation to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break. (integer value)
+# Deprecated group/name - [DEFAULT]/qpid_topology_version
+# qpid_topology_version = 1
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+# amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+# amqp_auto_delete = false
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+# rpc_conn_pool_size = 30
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+# kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+# kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+# kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+# kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+# kombu_reconnect_delay = 1.0
+
+# The RabbitMQ broker address where a single node is used. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+# rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_port
+# rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+# rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+# rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+# rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+# rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+# rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+# rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+# rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+# rabbit_retry_backoff = 2
+
+# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
+# count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+# rabbit_max_retries = 0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
+# must wipe the RabbitMQ database. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+# rabbit_ha_queues = false
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+# fake_rabbit = false
diff --git a/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini b/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini
deleted file mode 100644
index 256f7855..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/bigswitch/restproxy.ini
+++ /dev/null
@@ -1,114 +0,0 @@
-# Config file for neutron-proxy-plugin.
-
-[restproxy]
-# All configuration for this plugin is in section '[restproxy]'
-#
-# The following parameters are supported:
-# servers : <host:port>[,<host:port>]* (Error if not set)
-# server_auth : <username:password> (default: no auth)
-# server_ssl : True | False (default: True)
-# ssl_cert_directory : <path> (default: /etc/neutron/plugins/bigswitch/ssl)
-# no_ssl_validation : True | False (default: False)
-# ssl_sticky : True | False (default: True)
-# sync_data : True | False (default: False)
-# auto_sync_on_failure : True | False (default: True)
-# consistency_interval : <integer> (default: 60 seconds)
-# server_timeout : <integer> (default: 10 seconds)
-# neutron_id : <string> (default: neutron-<hostname>)
-# add_meta_server_route : True | False (default: True)
-# thread_pool_size : <int> (default: 4)
-
-# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover.
-servers=localhost:8080
-
-# The username and password for authenticating against the BigSwitch or Floodlight controller.
-# server_auth=username:password
-
-# Use SSL when connecting to the BigSwitch or Floodlight controller.
-# server_ssl=True
-
-# Directory which contains the ca_certs and host_certs to be used to validate
-# controller certificates.
-# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/
-
-# If a certificate does not exist for a controller, trust and store the first
-# certificate received for that controller and use it to validate future
-# connections to that controller.
-# ssl_sticky=True
-
-# Do not validate the controller certificates for SSL
-# Warning: This will not provide protection against man-in-the-middle attacks
-# no_ssl_validation=False
-
-# Sync data on connect
-# sync_data=False
-
-# If neutron fails to create a resource because the backend controller
-# doesn't know of a dependency, automatically trigger a full data
-# synchronization to the controller.
-# auto_sync_on_failure=True
-
-# Time between verifications that the backend controller
-# database is consistent with Neutron. (0 to disable)
-# consistency_interval = 60
-
-# Maximum number of seconds to wait for proxy request to connect and complete.
-# server_timeout=10
-
-# User defined identifier for this Neutron deployment
-# neutron_id =
-
-# Flag to decide if a route to the metadata server should be injected into the VM
-# add_meta_server_route = True
-
-# Number of threads to use to handle large volumes of port creation requests
-# thread_pool_size = 4
-
-[nova]
-# Specify the VIF_TYPE that will be controlled on the Nova compute instances
-# options: ivs or ovs
-# default: ovs
-# vif_type = ovs
-
-# Overrides for vif types based on nova compute node host IDs
-# Comma separated list of host IDs to fix to a specific VIF type
-# The VIF type is taken from the end of the configuration item
-# node_override_vif_<vif_type>
-# For example, the following would set the VIF type to IVS for
-# host-id1 and host-id2
-# node_overrride_vif_ivs=host-id1,host-id2
-
-[router]
-# Specify the default router rules installed in newly created tenant routers
-# Specify multiple times for multiple rules
-# Format is <tenant>:<source>:<destination>:<action>
-# Optionally, a comma-separated list of nexthops may be included after <action>
-# Use an * to specify default for all tenants
-# Default is any any allow for all tenants
-# tenant_default_router_rule=*:any:any:permit
-
-# Maximum number of rules that a single router may have
-# Default is 200
-# max_router_rules=200
-
-[restproxyagent]
-
-# Specify the name of the bridge used on compute nodes
-# for attachment.
-# Default: br-int
-# integration_bridge=br-int
-
-# Change the frequency of polling by the restproxy agent.
-# Value is seconds
-# Default: 5
-# polling_interval=5
-
-# Virtual switch type on the compute node.
-# Options: ovs or ivs
-# Default: ovs
-# virtual_switch_type = ovs
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
diff --git a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README b/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README
deleted file mode 100644
index e7e47a27..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/ca_certs/README
+++ /dev/null
@@ -1,3 +0,0 @@
-Certificates in this folder will be used to
-verify signatures for any controllers the plugin
-connects to.
diff --git a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README b/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README
deleted file mode 100644
index 8f5f5e77..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/bigswitch/ssl/host_certs/README
+++ /dev/null
@@ -1,6 +0,0 @@
-Certificates in this folder must match the name
-of the controller they should be used to authenticate
-with a .pem extension.
-
-For example, the certificate for the controller
-"192.168.0.1" should be named "192.168.0.1.pem".
diff --git a/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini b/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini
deleted file mode 100644
index 916e9e5d..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/brocade/brocade.ini
+++ /dev/null
@@ -1,29 +0,0 @@
-[switch]
-# username = The SSH username to use
-# password = The SSH password to use
-# address = The address of the host to SSH to
-# ostype = Should be NOS, but is unused otherwise
-#
-# Example:
-# username = admin
-# password = password
-# address = 10.24.84.38
-# ostype = NOS
-
-[physical_interface]
-# physical_interface = The network interface to use when creating a port
-#
-# Example:
-# physical_interface = physnet1
-
-[vlans]
-# network_vlan_ranges = <physical network name>:nnnn:mmmm
-#
-# Example:
-# network_vlan_ranges = physnet1:1000:2999
-
-[linux_bridge]
-# physical_interface_mappings = <physical network name>:<local interface>
-#
-# Example:
-# physical_interface_mappings = physnet1:em1
diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini
deleted file mode 100644
index d99e8382..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_cfg_agent.ini
+++ /dev/null
@@ -1,15 +0,0 @@
-[cfg_agent]
-# (IntOpt) Interval in seconds for processing of service updates.
-# That is when the config agent's process_services() loop executes
-# and it lets each service helper to process its service resources.
-# rpc_loop_interval = 10
-
-# (StrOpt) Period-separated module path to the routing service helper class.
-# routing_svc_helper_class = neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
-
-# (IntOpt) Timeout value in seconds for connecting to a hosting device.
-# device_connection_timeout = 30
-
-# (IntOpt) The time in seconds until a backlogged hosting device is
-# presumed dead or booted to an error state.
-# hosting_device_dead_timeout = 300
diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini
deleted file mode 100644
index 17eae737..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_plugins.ini
+++ /dev/null
@@ -1,100 +0,0 @@
-[cisco]
-
-# (StrOpt) A short prefix to prepend to the VLAN number when creating a
-# VLAN interface. For example, if an interface is being created for
-# VLAN 2001 it will be named 'q-2001' using the default prefix.
-#
-# vlan_name_prefix = q-
-# Example: vlan_name_prefix = vnet-
-
-# (StrOpt) A short prefix to prepend to the VLAN number when creating a
-# provider VLAN interface. For example, if an interface is being created
-# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
-#
-# provider_vlan_name_prefix = p-
-# Example: provider_vlan_name_prefix = PV-
-
-# (BoolOpt) A flag indicating whether Openstack networking should manage the
-# creation and removal of VLAN interfaces for provider networks on the Nexus
-# switches. If the flag is set to False then Openstack will not create or
-# remove VLAN interfaces for provider networks, and the administrator needs
-# to manage these interfaces manually or by external orchestration.
-#
-# provider_vlan_auto_create = True
-
-# (BoolOpt) A flag indicating whether Openstack networking should manage
-# the adding and removing of provider VLANs from trunk ports on the Nexus
-# switches. If the flag is set to False then Openstack will not add or
-# remove provider VLANs from trunk ports, and the administrator needs to
-# manage these operations manually or by external orchestration.
-#
-# provider_vlan_auto_trunk = True
-
-# (StrOpt) Period-separated module path to the model class to use for
-# the Cisco neutron plugin.
-#
-# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2
-
-# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches.
-# Note: This feature is not supported on all models/versions of Cisco
-# Nexus switches. To use this feature, all of the Nexus switches in the
-# deployment must support it.
-# nexus_l3_enable = False
-
-# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
-# svi_round_robin = False
-
-# Cisco Nexus Switch configurations.
-# Each switch to be managed by Openstack Neutron must be configured here.
-#
-# N1KV Format.
-# [N1KV:<IP address of VSM>]
-# username=<credential username>
-# password=<credential password>
-#
-# Example:
-# [N1KV:2.2.2.2]
-# username=admin
-# password=mySecretPassword
-
-[cisco_n1k]
-
-# (StrOpt) Specify the name of the integration bridge to which the VIFs are
-# attached.
-# Default value: br-int
-# integration_bridge = br-int
-
-# (StrOpt) Name of the policy profile to be associated with a port when no
-# policy profile is specified during port creates.
-# Default value: service_profile
-# default_policy_profile = service_profile
-
-# (StrOpt) Name of the policy profile to be associated with a port owned by
-# network node (dhcp, router).
-# Default value: dhcp_pp
-# network_node_policy_profile = dhcp_pp
-
-# (StrOpt) Name of the network profile to be associated with a network when no
-# network profile is specified during network creates. Admin should pre-create
-# a network profile with this name.
-# Default value: default_network_profile
-# default_network_profile = network_pool
-
-# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in
-# policy profiles.
-# Default value: 60
-# poll_duration = 60
-
-# (BoolOpt) Specify whether tenants are restricted from accessing all the
-# policy profiles.
-# Default value: False, indicating all tenants can access all policy profiles.
-#
-# restrict_policy_profiles = False
-
-# (IntOpt) Number of threads to use to make HTTP requests to the VSM.
-# Default value: 4
-# http_pool_size = 4
-
-# (IntOpt) Timeout duration in seconds for the http request
-# Default value: 15
-# http_timeout = 15
diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini
deleted file mode 100644
index 3ef271d2..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_router_plugin.ini
+++ /dev/null
@@ -1,76 +0,0 @@
-[general]
-#(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers
-# backlog_processing_interval = 10
-
-#(StrOpt) Name of the L3 admin tenant
-# l3_admin_tenant = L3AdminTenant
-
-#(StrOpt) Name of management network for hosting device configuration
-# management_network = osn_mgmt_nw
-
-#(StrOpt) Default security group applied on management port
-# default_security_group = mgmt_sec_grp
-
-#(IntOpt) Seconds of no status update until a cfg agent is considered down
-# cfg_agent_down_time = 60
-
-#(StrOpt) Path to templates for hosting devices
-# templates_path = /opt/stack/data/neutron/cisco/templates
-
-#(StrOpt) Path to config drive files for service VM instances
-# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive
-
-#(BoolOpt) Ensure that Nova is running before attempting to create any VM
-# ensure_nova_running = True
-
-[hosting_devices]
-# Settings coupled to CSR1kv VM devices
-# -------------------------------------
-#(StrOpt) Name of Glance image for CSR1kv
-# csr1kv_image = csr1kv_openstack_img
-
-#(StrOpt) UUID of Nova flavor for CSR1kv
-# csr1kv_flavor = 621
-
-#(StrOpt) Plugging driver for CSR1kv
-# csr1kv_plugging_driver = neutron.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver
-
-#(StrOpt) Hosting device driver for CSR1kv
-# csr1kv_device_driver = neutron.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver
-
-#(StrOpt) Config agent router service driver for CSR1kv
-# csr1kv_cfgagent_router_driver = neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver
-
-#(StrOpt) Configdrive template file for CSR1kv
-# csr1kv_configdrive_template = csr1kv_cfg_template
-
-#(IntOpt) Booting time in seconds before a CSR1kv becomes operational
-# csr1kv_booting_time = 420
-
-#(StrOpt) Username to use for CSR1kv configurations
-# csr1kv_username = stack
-
-#(StrOpt) Password to use for CSR1kv configurations
-# csr1kv_password = cisco
-
-[n1kv]
-# Settings coupled to inter-working with N1kv plugin
-# --------------------------------------------------
-#(StrOpt) Name of N1kv port profile for management ports
-# management_port_profile = osn_mgmt_pp
-
-#(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic
-# from VXLAN segmented networks).
-# t1_port_profile = osn_t1_pp
-
-#(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic
-# from VLAN segmented networks).
-# t2_port_profile = osn_t2_pp
-
-#(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks
-# for VXLAN segmented traffic).
-# t1_network_profile = osn_t1_np
-
-#(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks
-# for VLAN segmented traffic).
-# t2_network_profile = osn_t2_np
diff --git a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini b/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini
deleted file mode 100644
index 0aee17eb..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/cisco/cisco_vpn_agent.ini
+++ /dev/null
@@ -1,26 +0,0 @@
-[cisco_csr_ipsec]
-# Status check interval in seconds, for VPNaaS IPSec connections used on CSR
-# status_check_interval = 60
-
-# Cisco CSR management port information for REST access used by VPNaaS
-# TODO(pcm): Remove once CSR is integrated in as a Neutron router.
-#
-# Format is:
-# [cisco_csr_rest:<public IP>]
-# rest_mgmt = <mgmt port IP>
-# tunnel_ip = <tunnel IP>
-# username = <user>
-# password = <password>
-# timeout = <timeout>
-# host = <hostname>
-# tunnel_if = <tunnel I/F>
-#
-# where:
-# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR)
-# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel
-# mgmt port IP -- IP address of CSR for REST API access
-# user ---------- Username for REST management port access to Cisco CSR
-# password ------ Password for REST management port access to Cisco CSR
-# timeout ------- REST request timeout to Cisco CSR (optional)
-# hostname ------ Name of host where CSR is running as a VM
-# tunnel I/F ---- CSR port name used for tunnels' IP address
diff --git a/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini b/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini
deleted file mode 100644
index 0ca9b46f..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/embrane/heleos_conf.ini
+++ /dev/null
@@ -1,41 +0,0 @@
-[heleos]
-#configure the ESM management address
-#in the first version of this plugin, only one ESM can be specified
-#Example:
-#esm_mgmt=
-
-#configure admin username and password
-#admin_username=
-#admin_password=
-
-#router image id
-#Example:
-#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0
-
-#mgmt shared security zone id
-#defines the shared management security zone. Each tenant can have a private one configured through the ESM
-#Example:
-#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a
-
-#in-band shared security zone id
-#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM
-#Example:
-#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc
-
-#oob-band shared security zone id
-#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM
-#Example:
-#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871
-
-#dummy security zone id
-#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces
-#Example:
-#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08
-
-#resource pool id
-#define the shared resource pool. Each tenant can have a private one configured through the ESM
-#Example
-#resource_pool_id=
-
-#define if the requests have to be executed asynchronously by the plugin or not
-#async_requests=
diff --git a/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
deleted file mode 100644
index 5eeec570..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
+++ /dev/null
@@ -1,63 +0,0 @@
-[hyperv]
-# (StrOpt) Type of network to allocate for tenant networks. The
-# default value 'local' is useful only for single-box testing and
-# provides no connectivity between hosts. You MUST either change this
-# to 'vlan' and configure network_vlan_ranges below or to 'flat'.
-# Set to 'none' to disable creation of tenant networks.
-#
-# tenant_network_type = local
-# Example: tenant_network_type = vlan
-
-# (ListOpt) Comma-separated list of
-# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
-# of VLAN IDs on named physical networks that are available for
-# allocation. All physical networks listed are available for flat and
-# VLAN provider network creation. Specified ranges of VLAN IDs are
-# available for tenant network allocation if tenant_network_type is
-# 'vlan'. If empty, only gre and local networks may be created.
-#
-# network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999
-
-[agent]
-# Agent's polling interval in seconds
-# polling_interval = 2
-
-# (ListOpt) Comma separated list of <physical_network>:<vswitch>
-# where the physical networks can be expressed with wildcards,
-# e.g.: ."*:external".
-# The referred external virtual switches need to be already present on
-# the Hyper-V server.
-# If a given physical network name will not match any value in the list
-# the plugin will look for a virtual switch with the same name.
-#
-# physical_network_vswitch_mappings = *:external
-# Example: physical_network_vswitch_mappings = net1:external1,net2:external2
-
-# (StrOpt) Private virtual switch name used for local networking.
-#
-# local_network_vswitch = private
-# Example: local_network_vswitch = custom_vswitch
-
-# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's
-# metric APIs. Collected data can by retrieved by other apps and services,
-# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above.
-#
-# enable_metrics_collection = False
-
-#-----------------------------------------------------------------------------
-# Sample Configurations.
-#-----------------------------------------------------------------------------
-#
-# Neutron server:
-#
-# [HYPERV]
-# tenant_network_type = vlan
-# network_vlan_ranges = default:2000:3999
-#
-# Agent running on Hyper-V node:
-#
-# [AGENT]
-# polling_interval = 2
-# physical_network_vswitch_mappings = *:external
-# local_network_vswitch = private
diff --git a/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini
deleted file mode 100644
index 0fab5070..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ibm/sdnve_neutron_plugin.ini
+++ /dev/null
@@ -1,50 +0,0 @@
-[sdnve]
-# (ListOpt) The IP address of one (or more) SDN-VE controllers
-# Default value is: controller_ips = 127.0.0.1
-# Example: controller_ips = 127.0.0.1,127.0.0.2
-# (StrOpt) The integration bridge for OF based implementation
-# The default value for integration_bridge is None
-# Example: integration_bridge = br-int
-# (ListOpt) The interface mapping connecting the integration
-# bridge to external network as a list of physical network names and
-# interfaces: <physical_network_name>:<interface_name>
-# Example: interface_mappings = default:eth2
-# (BoolOpt) Used to reset the integration bridge, if exists
-# The default value for reset_bridge is True
-# Example: reset_bridge = False
-# (BoolOpt) Used to set the OVS controller as out-of-band
-# The default value for out_of_band is True
-# Example: out_of_band = False
-#
-# (BoolOpt) The fake controller for testing purposes
-# Default value is: use_fake_controller = False
-# (StrOpt) The port number for use with controller
-# The default value for the port is 8443
-# Example: port = 8443
-# (StrOpt) The userid for use with controller
-# The default value for the userid is admin
-# Example: userid = sdnve_user
-# (StrOpt) The password for use with controller
-# The default value for the password is admin
-# Example: password = sdnve_password
-#
-# (StrOpt) The default type of tenants (and associated resources)
-# Available choices are: OVERLAY or OF
-# The default value for tenant type is OVERLAY
-# Example: default_tenant_type = OVERLAY
-# (StrOpt) The string in tenant description that indicates
-# Default value for OF tenants: of_signature = SDNVE-OF
-# (StrOpt) The string in tenant description that indicates
-# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY
-
-[sdnve_agent]
-# (IntOpt) Agent's polling interval in seconds
-# polling_interval = 2
-# (StrOpt) What to use for root helper
-# The default value: root_helper = 'sudo'
-# (BoolOpt) Whether to use rpc or not
-# The default value: rpc = True
-
-[securitygroup]
-# The security group is not supported:
-# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
diff --git a/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini b/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini
deleted file mode 100644
index 94fe9803..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/linuxbridge/linuxbridge_conf.ini
+++ /dev/null
@@ -1,78 +0,0 @@
-[vlans]
-# (StrOpt) Type of network to allocate for tenant networks. The
-# default value 'local' is useful only for single-box testing and
-# provides no connectivity between hosts. You MUST change this to
-# 'vlan' and configure network_vlan_ranges below in order for tenant
-# networks to provide connectivity between hosts. Set to 'none' to
-# disable creation of tenant networks.
-#
-# tenant_network_type = local
-# Example: tenant_network_type = vlan
-
-# (ListOpt) Comma-separated list of
-# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
-# of VLAN IDs on named physical networks that are available for
-# allocation. All physical networks listed are available for flat and
-# VLAN provider network creation. Specified ranges of VLAN IDs are
-# available for tenant network allocation if tenant_network_type is
-# 'vlan'. If empty, only local networks may be created.
-#
-# network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999
-
-[linux_bridge]
-# (ListOpt) Comma-separated list of
-# <physical_network>:<physical_interface> tuples mapping physical
-# network names to the agent's node-specific physical network
-# interfaces to be used for flat and VLAN networks. All physical
-# networks listed in network_vlan_ranges on the server should have
-# mappings to appropriate interfaces on each agent.
-#
-# physical_interface_mappings =
-# Example: physical_interface_mappings = physnet1:eth1
-
-[vxlan]
-# (BoolOpt) enable VXLAN on the agent
-# VXLAN support can be enabled when agent is managed by ml2 plugin using
-# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin.
-# enable_vxlan = False
-#
-# (IntOpt) use specific TTL for vxlan interface protocol packets
-# ttl =
-#
-# (IntOpt) use specific TOS for vxlan interface protocol packets
-# tos =
-#
-# (StrOpt) multicast group to use for broadcast emulation.
-# This group must be the same on all the agents.
-# vxlan_group = 224.0.0.1
-#
-# (StrOpt) Local IP address to use for VXLAN endpoints (required)
-# local_ip =
-#
-# (BoolOpt) Flag to enable l2population extension. This option should be used
-# in conjunction with ml2 plugin l2population mechanism driver (in that case,
-# both linuxbridge and l2population mechanism drivers should be loaded).
-# It enables plugin to populate VXLAN forwarding table, in order to limit
-# the use of broadcast emulation (multicast will be turned off if kernel and
-# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10)
-# l2_population = False
-
-[agent]
-# Agent's polling interval in seconds
-# polling_interval = 2
-
-# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
-# agents.
-#
-# rpc_support_old_agents = False
-# Example: rpc_support_old_agents = True
-
-[securitygroup]
-# Firewall driver for realizing neutron security group function
-# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
-
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
diff --git a/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini b/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini
deleted file mode 100644
index 2b9bfa5e..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/metaplugin/metaplugin.ini
+++ /dev/null
@@ -1,31 +0,0 @@
-# Config file for Metaplugin
-
-[meta]
-# Comma separated list of flavor:neutron_plugin for plugins to load.
-# Extension method is searched in the list order and the first one is used.
-plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2'
-
-# Comma separated list of flavor:neutron_plugin for L3 service plugins
-# to load.
-# This is intended for specifying L2 plugins which support L3 functions.
-# If you use a router service plugin, set this blank.
-l3_plugin_list =
-
-# Default flavor to use, when flavor:network is not specified at network
-# creation.
-default_flavor = 'nvp'
-
-# Default L3 flavor to use, when flavor:router is not specified at router
-# creation.
-# Ignored if 'l3_plugin_list' is blank.
-default_l3_flavor =
-
-# Comma separated list of supported extension aliases.
-supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler'
-
-# Comma separated list of method:flavor to select specific plugin for a method.
-# This has priority over method search order based on 'plugin_list'.
-extension_map = 'get_port_stats:nvp'
-
-# Specifies flavor for plugin to handle 'q-plugin' RPC requests.
-rpc_flavor = 'ml2'
diff --git a/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini b/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini
deleted file mode 100644
index f2e94052..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/midonet/midonet.ini
+++ /dev/null
@@ -1,19 +0,0 @@
-
-[midonet]
-# MidoNet API server URI
-# midonet_uri = http://localhost:8080/midonet-api
-
-# MidoNet admin username
-# username = admin
-
-# MidoNet admin password
-# password = passw0rd
-
-# ID of the project that MidoNet admin user belongs to
-# project_id = 77777777-7777-7777-7777-777777777777
-
-# Virtual provider router ID
-# provider_router_id = 00112233-0011-0011-0011-001122334455
-
-# Path to midonet host uuid file
-# midonet_host_uuid_path = /etc/midolman/host_uuid.properties
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
index b8097ce2..ac9a3d0d 100644
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
+++ b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf.ini
@@ -4,7 +4,6 @@
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = flat,gre
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
@@ -12,7 +11,6 @@ type_drivers = flat,gre
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = gre
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
@@ -22,13 +20,44 @@ tenant_network_types = gre
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = openvswitch
# (ListOpt) Ordered list of extension driver entrypoints
# to be loaded from the neutron.ml2.extension_drivers namespace.
# extension_drivers =
# Example: extension_drivers = anewextensiondriver
+# =========== items for MTU selection and advertisement =============
+# (IntOpt) Path MTU. The maximum permissible size of an unfragmented
+# packet travelling from and to addresses where encapsulated Neutron
+# traffic is sent. Drivers calculate maximum viable MTU for
+# validating tenant requests based on this value (typically,
+# path_mtu - max encap header size). If <=0, the path MTU is
+# indeterminate and no calculation takes place.
+# path_mtu = 0
+
+# (IntOpt) Segment MTU. The maximum permissible size of an
+# unfragmented packet travelling a L2 network segment. If <=0,
+# the segment MTU is indeterminate and no calculation takes place.
+# segment_mtu = 0
+
+# (ListOpt) Physical network MTUs. List of mappings of physical
+# network to MTU value. The format of the mapping is
+# <physnet>:<mtu val>. This mapping allows specifying a
+# physical network MTU value that differs from the default
+# segment_mtu value.
+# physical_network_mtus =
+# Example: physical_network_mtus = physnet1:1550, physnet2:1500
+# ======== end of items for MTU selection and advertisement =========
+
+# (StrOpt) Default network type for external networks when no provider
+# attributes are specified. By default it is None, which means that if
+# provider attributes are not specified while creating external networks
+# then they will have the same type as tenant networks.
+# Allowed values for external_network_type config option depend on the
+# network type values configured in type_drivers config option.
+# external_network_type =
+# Example: external_network_type = local
+
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
@@ -37,7 +66,6 @@ mechanism_drivers = openvswitch
# flat_networks =
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
-flat_networks = External
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
@@ -47,11 +75,10 @@ flat_networks = External
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-#network_vlan_ranges = Physnet1:100:200
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-tunnel_id_ranges = 1:1000
+# tunnel_id_ranges =
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
@@ -69,18 +96,8 @@ tunnel_id_ranges = 1:1000
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
-enable_security_group = True
+# enable_security_group = True
# Use ipset to speed-up the iptables security groups. Enabling ipset support
# requires that ipset is installed on L2 agent node.
-enable_ipset = True
-
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-
-[ovs]
-local_ip = {{ MANAGEMENT_INTERFACE_IP_ADDRESS }}
-enable_tunneling = True
-bridge_mappings=External:br-ex
-
-[agent]
-tunnel_types = gre
+# enable_ipset = True
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini
deleted file mode 100644
index abaf5bc7..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_arista.ini
+++ /dev/null
@@ -1,100 +0,0 @@
-# Defines configuration options specific for Arista ML2 Mechanism driver
-
-[ml2_arista]
-# (StrOpt) EOS IP address. This is required field. If not set, all
-# communications to Arista EOS will fail
-#
-# eapi_host =
-# Example: eapi_host = 192.168.0.1
-#
-# (StrOpt) EOS command API username. This is required field.
-# if not set, all communications to Arista EOS will fail.
-#
-# eapi_username =
-# Example: arista_eapi_username = admin
-#
-# (StrOpt) EOS command API password. This is required field.
-# if not set, all communications to Arista EOS will fail.
-#
-# eapi_password =
-# Example: eapi_password = my_password
-#
-# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs
-# ("node1.domain.com") or as short names ("node1"). This is
-# optional. If not set, a value of "True" is assumed.
-#
-# use_fqdn =
-# Example: use_fqdn = True
-#
-# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
-# This field defines how often the synchronization is performed.
-# This is an optional field. If not set, a value of 180 seconds
-# is assumed.
-#
-# sync_interval =
-# Example: sync_interval = 60
-#
-# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller.
-# This is useful when multiple OpenStack/Neutron controllers are
-# managing the same Arista HW clusters. Note that this name must
-# match with the region name registered (or known) to keystone
-# service. Authentication with Keysotne is performed by EOS.
-# This is optional. If not set, a value of "RegionOne" is assumed.
-#
-# region_name =
-# Example: region_name = RegionOne
-
-
-[l3_arista]
-
-# (StrOpt) primary host IP address. This is required field. If not set, all
-# communications to Arista EOS will fail. This is the host where
-# primary router is created.
-#
-# primary_l3_host =
-# Example: primary_l3_host = 192.168.10.10
-#
-# (StrOpt) Primary host username. This is required field.
-# if not set, all communications to Arista EOS will fail.
-#
-# primary_l3_host_username =
-# Example: arista_primary_l3_username = admin
-#
-# (StrOpt) Primary host password. This is required field.
-# if not set, all communications to Arista EOS will fail.
-#
-# primary_l3_host_password =
-# Example: primary_l3_password = my_password
-#
-# (StrOpt) IP address of the second Arista switch paired as
-# MLAG (Multi-chassis Link Aggregation) with the first.
-# This is optional field, however, if mlag_config flag is set,
-# then this is a required field. If not set, all
-# communications to Arista EOS will fail. If mlag_config is set
-# to False, then this field is ignored
-#
-# seconadary_l3_host =
-# Example: seconadary_l3_host = 192.168.10.20
-#
-# (BoolOpt) Defines if Arista switches are configured in MLAG mode
-# If yes, all L3 configuration is pushed to both switches
-# automatically. If this flag is set, ensure that secondary_l3_host
-# is set to the second switch's IP.
-# This flag is Optional. If not set, a value of "False" is assumed.
-#
-# mlag_config =
-# Example: mlag_config = True
-#
-# (BoolOpt) Defines if the router is created in default VRF or a
-# a specific VRF. This is optional.
-# If not set, a value of "False" is assumed.
-#
-# Example: use_vrf = True
-#
-# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
-# This field defines how often the synchronization is performed.
-# This is an optional field. If not set, a value of 180 seconds
-# is assumed.
-#
-# l3_sync_interval =
-# Example: l3_sync_interval = 60
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini
deleted file mode 100644
index 67574110..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_brocade.ini
+++ /dev/null
@@ -1,15 +0,0 @@
-[ml2_brocade]
-# username = <mgmt admin username>
-# password = <mgmt admin password>
-# address = <switch mgmt ip address>
-# ostype = NOS
-# osversion = autodetect | n.n.n
-# physical_networks = physnet1,physnet2
-#
-# Example:
-# username = admin
-# password = password
-# address = 10.24.84.38
-# ostype = NOS
-# osversion = 4.1.1
-# physical_networks = physnet1,physnet2
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini
deleted file mode 100644
index 1b69100e..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_cisco.ini
+++ /dev/null
@@ -1,118 +0,0 @@
-[ml2_cisco]
-
-# (StrOpt) A short prefix to prepend to the VLAN number when creating a
-# VLAN interface. For example, if an interface is being created for
-# VLAN 2001 it will be named 'q-2001' using the default prefix.
-#
-# vlan_name_prefix = q-
-# Example: vlan_name_prefix = vnet-
-
-# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
-# svi_round_robin = False
-
-#
-# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch.
-# This string value must be present in the ml2_conf.ini network_vlan_ranges
-# variable.
-#
-# managed_physical_network =
-# Example: managed_physical_network = physnet1
-
-# Cisco Nexus Switch configurations.
-# Each switch to be managed by Openstack Neutron must be configured here.
-#
-# Cisco Nexus Switch Format.
-# [ml2_mech_cisco_nexus:<IP address of switch>]
-# <hostname>=<intf_type:port> (1)
-# ssh_port=<ssh port> (2)
-# username=<credential username> (3)
-# password=<credential password> (4)
-#
-# (1) For each host connected to a port on the switch, specify the hostname
-# and the Nexus physical port (interface) it is connected to.
-# Valid intf_type's are 'ethernet' and 'port-channel'.
-# The default setting for <intf_type:> is 'ethernet' and need not be
-# added to this setting.
-# (2) The TCP port for connecting via SSH to manage the switch. This is
-# port number 22 unless the switch has been configured otherwise.
-# (3) The username for logging into the switch to manage it.
-# (4) The password for logging into the switch to manage it.
-#
-# Example:
-# [ml2_mech_cisco_nexus:1.1.1.1]
-# compute1=1/1
-# compute2=ethernet:1/2
-# compute3=port-channel:1
-# ssh_port=22
-# username=admin
-# password=mySecretPassword
-
-[ml2_cisco_apic]
-
-# Hostname:port list of APIC controllers
-# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80
-
-# Username for the APIC controller
-# apic_username = user
-
-# Password for the APIC controller
-# apic_password = password
-
-# Whether use SSl for connecting to the APIC controller or not
-# apic_use_ssl = True
-
-# How to map names to APIC: use_uuid or use_name
-# apic_name_mapping = use_name
-
-# Names for APIC objects used by Neutron
-# Note: When deploying multiple clouds against one APIC,
-# these names must be unique between the clouds.
-# apic_vmm_domain = openstack
-# apic_vlan_ns_name = openstack_ns
-# apic_node_profile = openstack_profile
-# apic_entity_profile = openstack_entity
-# apic_function_profile = openstack_function
-# apic_app_profile_name = openstack_app
-# Agent timers for State reporting and topology discovery
-# apic_sync_interval = 30
-# apic_agent_report_interval = 30
-# apic_agent_poll_interval = 2
-
-# Specify your network topology.
-# This section indicates how your compute nodes are connected to the fabric's
-# switches and ports. The format is as follows:
-#
-# [apic_switch:<swich_id_from_the_apic>]
-# <compute_host>,<compute_host> = <switchport_the_host(s)_are_connected_to>
-#
-# You can have multiple sections, one for each switch in your fabric that is
-# participating in Openstack. e.g.
-#
-# [apic_switch:17]
-# ubuntu,ubuntu1 = 1/10
-# ubuntu2,ubuntu3 = 1/11
-#
-# [apic_switch:18]
-# ubuntu5,ubuntu6 = 1/1
-# ubuntu7,ubuntu8 = 1/2
-
-# Describe external connectivity.
-# In this section you can specify the external network configuration in order
-# for the plugin to be able to teach the fabric how to route the internal
-# traffic to the outside world. The external connectivity configuration
-# format is as follows:
-#
-# [apic_external_network:<externalNetworkName>]
-# switch = <switch_id_from_the_apic>
-# port = <switchport_the_external_router_is_connected_to>
-# encap = <encapsulation>
-# cidr_exposed = <cidr_exposed_to_the_external_router>
-# gateway_ip = <ip_of_the_external_gateway>
-#
-# An example follows:
-# [apic_external_network:network_ext]
-# switch=203
-# port=1/34
-# encap=vlan-100
-# cidr_exposed=10.10.40.2/16
-# gateway_ip=10.10.40.1
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini
deleted file mode 100644
index 6ee4a4e0..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_fslsdn.ini
+++ /dev/null
@@ -1,52 +0,0 @@
-# Defines Configuration options for FSL SDN OS Mechanism Driver
-# Cloud Resource Discovery (CRD) authorization credentials
-[ml2_fslsdn]
-#(StrOpt) User name for authentication to CRD.
-# e.g.: user12
-#
-# crd_user_name =
-
-#(StrOpt) Password for authentication to CRD.
-# e.g.: secret
-#
-# crd_password =
-
-#(StrOpt) Tenant name for CRD service.
-# e.g.: service
-#
-# crd_tenant_name =
-
-#(StrOpt) CRD auth URL.
-# e.g.: http://127.0.0.1:5000/v2.0/
-#
-# crd_auth_url =
-
-#(StrOpt) URL for connecting to CRD Service.
-# e.g.: http://127.0.0.1:9797
-#
-# crd_url=
-
-#(IntOpt) Timeout value for connecting to CRD service
-# in seconds, e.g.: 30
-#
-# crd_url_timeout=
-
-#(StrOpt) Region name for connecting to CRD in
-# admin context, e.g.: RegionOne
-#
-# crd_region_name=
-
-#(BoolOpt)If set, ignore any SSL validation issues (boolean value)
-# e.g.: False
-#
-# crd_api_insecure=
-
-#(StrOpt)Authorization strategy for connecting to CRD in admin
-# context, e.g.: keystone
-#
-# crd_auth_strategy=
-
-#(StrOpt)Location of CA certificates file to use for CRD client
-# requests.
-#
-# crd_ca_certificates_file=
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini
deleted file mode 100644
index 46139aed..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_mlnx.ini
+++ /dev/null
@@ -1,4 +0,0 @@
-[eswitch]
-# (StrOpt) Type of Network Interface to allocate for VM:
-# mlnx_direct or hostdev according to libvirt terminology
-# vnic_type = mlnx_direct
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini
deleted file mode 100644
index dbbfcbd2..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ncs.ini
+++ /dev/null
@@ -1,28 +0,0 @@
-# Defines configuration options specific to the Tail-f NCS Mechanism Driver
-
-[ml2_ncs]
-# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack
-# subtree.
-# If this is not set then no HTTP requests will be made.
-#
-# url =
-# Example: url = http://ncs/api/running/services/openstack
-
-# (StrOpt) Username for HTTP basic authentication to NCS.
-# This is an optional parameter. If unspecified then no authentication is used.
-#
-# username =
-# Example: username = admin
-
-# (StrOpt) Password for HTTP basic authentication to NCS.
-# This is an optional parameter. If unspecified then no authentication is used.
-#
-# password =
-# Example: password = admin
-
-# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion.
-# This is an optional parameter, default value is 10 seconds.
-#
-# timeout =
-# Example: timeout = 15
-
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini
deleted file mode 100644
index 9e88c1bb..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_odl.ini
+++ /dev/null
@@ -1,30 +0,0 @@
-# Configuration for the OpenDaylight MechanismDriver
-
-[ml2_odl]
-# (StrOpt) OpenDaylight REST URL
-# If this is not set then no HTTP requests will be made.
-#
-# url =
-# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron
-
-# (StrOpt) Username for HTTP basic authentication to ODL.
-#
-# username =
-# Example: username = admin
-
-# (StrOpt) Password for HTTP basic authentication to ODL.
-#
-# password =
-# Example: password = admin
-
-# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion.
-# This is an optional parameter, default value is 10 seconds.
-#
-# timeout = 10
-# Example: timeout = 15
-
-# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout.
-# This is an optional parameter, default value is 30 minutes.
-#
-# session_timeout = 30
-# Example: session_timeout = 60
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini
deleted file mode 100644
index 4a94b987..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_ofa.ini
+++ /dev/null
@@ -1,13 +0,0 @@
-# Defines configuration options specific to the OpenFlow Agent Mechanism Driver
-
-[ovs]
-# Please refer to configuration options to the OpenvSwitch
-
-[agent]
-# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath.
-# This is an optional parameter, default value is 60 seconds.
-#
-# get_datapath_retry_times =
-# Example: get_datapath_retry_times = 30
-
-# Please refer to configuration options to the OpenvSwitch else the above.
diff --git a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini b/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini
deleted file mode 100644
index 9566f54c..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ml2/ml2_conf_sriov.ini
+++ /dev/null
@@ -1,31 +0,0 @@
-# Defines configuration options for SRIOV NIC Switch MechanismDriver
-# and Agent
-
-[ml2_sriov]
-# (ListOpt) Comma-separated list of
-# supported Vendor PCI Devices, in format vendor_id:product_id
-#
-# supported_pci_vendor_devs = 15b3:1004, 8086:10c9
-# Example: supported_pci_vendor_devs = 15b3:1004
-#
-# (BoolOpt) Requires running SRIOV neutron agent for port binding
-# agent_required = True
-
-[sriov_nic]
-# (ListOpt) Comma-separated list of <physical_network>:<network_device>
-# tuples mapping physical network names to the agent's node-specific
-# physical network device interfaces of SR-IOV physical function to be used
-# for VLAN networks. All physical networks listed in network_vlan_ranges on
-# the server should have mappings to appropriate interfaces on each agent.
-#
-# physical_device_mappings =
-# Example: physical_device_mappings = physnet1:eth1
-#
-# (ListOpt) Comma-separated list of <network_device>:<vfs__to_exclude>
-# tuples, mapping network_device to the agent's node-specific list of virtual
-# functions that should not be used for virtual networking.
-# vfs_to_exclude is a semicolon-separated list of virtual
-# functions to exclude from network_device. The network_device in the
-# mapping should appear in the physical_device_mappings list.
-# exclude_devices =
-# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3
diff --git a/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini b/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini
deleted file mode 100644
index b1225111..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/mlnx/mlnx_conf.ini
+++ /dev/null
@@ -1,79 +0,0 @@
-[mlnx]
-# (StrOpt) Type of network to allocate for tenant networks. The
-# default value is 'vlan' You MUST configure network_vlan_ranges below
-# in order for tenant networks to provide connectivity between hosts.
-# Set to 'none' to disable creation of tenant networks.
-#
-# tenant_network_type = vlan
-# Example: tenant_network_type = vlan
-
-# (ListOpt) Comma-separated list of
-# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
-# of VLAN IDs on named physical networks that are available for
-# allocation. All physical networks listed are available for flat and
-# VLAN provider network creation. Specified ranges of VLAN IDs are
-# available for tenant network allocation if tenant_network_type is
-# 'vlan'. If empty, only local networks may be created.
-#
-# network_vlan_ranges =
-# Example: network_vlan_ranges = default:1:100
-
-# (ListOpt) Comma-separated list of
-# <physical_network>:<physical_network_type> tuples mapping physical
-# network names to physical network types. All physical
-# networks listed in network_vlan_ranges should have
-# mappings to appropriate physical network type.
-# Type of the physical network can be either eth (Ethernet) or
-# ib (InfiniBand). If empty, physical network eth type is assumed.
-#
-# physical_network_type_mappings =
-# Example: physical_network_type_mappings = default:eth
-
-# (StrOpt) Type of the physical network, can be either 'eth' or 'ib'
-# The default value is 'eth'
-# physical_network_type = eth
-
-[eswitch]
-# (ListOpt) Comma-separated list of
-# <physical_network>:<physical_interface> tuples mapping physical
-# network names to the agent's node-specific physical network
-# interfaces to be used for flat and VLAN networks. All physical
-# networks listed in network_vlan_ranges on the server should have
-# mappings to appropriate interfaces on each agent.
-#
-# physical_interface_mappings =
-# Example: physical_interface_mappings = default:eth2
-
-# (StrOpt) Type of Network Interface to allocate for VM:
-# direct or hosdev according to libvirt terminology
-# vnic_type = mlnx_direct
-
-# (StrOpt) Eswitch daemon end point connection url
-# daemon_endpoint = 'tcp://127.0.0.1:60001'
-
-# The number of milliseconds the agent will wait for
-# response on request to daemon
-# request_timeout = 3000
-
-# The number of retries the agent will send request
-# to daemon before giving up
-# retries = 3
-
-# The backoff rate multiplier for waiting period between retries
-# on request to daemon, i.e. value of 2 will double
-# the request timeout each retry
-# backoff_rate = 2
-
-[agent]
-# Agent's polling interval in seconds
-# polling_interval = 2
-
-# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
-# agents.
-#
-# rpc_support_old_agents = False
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
diff --git a/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini b/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini
deleted file mode 100644
index aa4171da..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/nec/nec.ini
+++ /dev/null
@@ -1,60 +0,0 @@
-# Sample Configurations
-
-[ovs]
-# Do not change this parameter unless you have a good reason to.
-# This is the name of the OVS integration bridge. There is one per hypervisor.
-# The integration bridge acts as a virtual "patch port". All VM VIFs are
-# attached to this bridge and then "patched" according to their network
-# connectivity.
-# integration_bridge = br-int
-
-[agent]
-# Agent's polling interval in seconds
-# polling_interval = 2
-
-[securitygroup]
-# Firewall driver for realizing neutron security group function
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-
-[ofc]
-# Specify OpenFlow Controller Host, Port and Driver to connect.
-# host = 127.0.0.1
-# port = 8888
-
-# Base URL of OpenFlow Controller REST API.
-# It is prepended to a path of each API request.
-# path_prefix =
-
-# Drivers are in neutron/plugins/nec/drivers/ .
-# driver = trema
-
-# PacketFilter is available when it's enabled in this configuration
-# and supported by the driver.
-# enable_packet_filter = true
-
-# Use SSL to connect
-# use_ssl = false
-
-# Key file
-# key_file =
-
-# Certificate file
-# cert_file =
-
-# Disable SSL certificate verification
-# insecure_ssl = false
-
-# Maximum attempts per OFC API request. NEC plugin retries
-# API request to OFC when OFC returns ServiceUnavailable (503).
-# The value must be greater than 0.
-# api_max_attempts = 3
-
-[provider]
-# Default router provider to use.
-# default_router_provider = l3-agent
-# List of enabled router providers.
-# router_providers = l3-agent,openflow
diff --git a/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini
deleted file mode 100644
index aad37bd5..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/nuage/nuage_plugin.ini
+++ /dev/null
@@ -1,41 +0,0 @@
-# Please fill in the correct data for all the keys below and uncomment key-value pairs
-[restproxy]
-# (StrOpt) Default Network partition in which VSD will
-# orchestrate network resources using openstack
-#
-#default_net_partition_name = <default-net-partition-name>
-
-# (StrOpt) Nuage provided uri for initial authorization to
-# access VSD
-#
-#auth_resource = /auth
-
-# (StrOpt) IP Address and Port of VSD
-#
-#server = ip:port
-
-# (StrOpt) Organization name in which VSD will orchestrate
-# network resources using openstack
-#
-#organization = org
-
-# (StrOpt) Username and password of VSD for authentication
-#
-#serverauth = uname:pass
-
-# (BoolOpt) Boolean for SSL connection with VSD server
-#
-#serverssl = True
-
-# (StrOpt) Nuage provided base uri to reach out to VSD
-#
-#base_uri = /base
-
-[syncmanager]
-# (BoolOpt) Boolean to enable sync between openstack and VSD
-#
-#enable_sync = False
-
-# (IntOpt) Sync interval in seconds between openstack and VSD
-#
-#sync_interval = 0 \ No newline at end of file
diff --git a/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini b/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini
deleted file mode 100644
index a1c05d97..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/oneconvergence/nvsdplugin.ini
+++ /dev/null
@@ -1,35 +0,0 @@
-[nvsd]
-# Configure the NVSD controller. The plugin proxies the api calls using
-# to NVSD controller which implements the required functionality.
-
-# IP address of NVSD controller api server
-# nvsd_ip = <ip address of nvsd controller>
-
-# Port number of NVSD controller api server
-# nvsd_port = 8082
-
-# Authentication credentials to access the api server
-# nvsd_user = <nvsd controller username>
-# nvsd_passwd = <password>
-
-# API request timeout in seconds
-# request_timeout = <default request timeout>
-
-# Maximum number of retry attempts to login to the NVSD controller
-# Specify 0 to retry until success (default)
-# nvsd_retries = 0
-
-[securitygroup]
-# Specify firewall_driver option, if neutron security groups are disabled,
-# then NoopFirewallDriver otherwise OVSHybridIptablesFirewallDriver.
-# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-
-[agent]
-# root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
-
-[database]
-# connection = mysql://root:<passwd>@127.0.0.1/<neutron_db>?charset=utf8
diff --git a/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini b/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini
deleted file mode 100644
index 629f1fc4..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/opencontrail/contrailplugin.ini
+++ /dev/null
@@ -1,26 +0,0 @@
-# OpenContrail is an Apache 2.0-licensed project that is built using
-# standards-based protocols and provides all the necessary components for
-# network virtualization–SDN controller, virtual router, analytics engine,
-# and published northbound APIs
-# For more information visit: http://opencontrail.org
-
-# Opencontrail plugin specific configuration
-[CONTRAIL]
-# (StrOpt) IP address to connect to opencontrail controller.
-# Uncomment this line for specifying the IP address of the opencontrail
-# Api-Server.
-# Default value is local host(127.0.0.1).
-# api_server_ip='127.0.0.1'
-
-# (IntOpt) port to connect to opencontrail controller.
-# Uncomment this line for the specifying the Port of the opencontrail
-# Api-Server.
-# Default value is 8082
-# api_server_port=8082
-
-# (DictOpt) enable opencontrail extensions
-# Opencontrail in future would support extension such as ipam, policy,
-# these extensions can be configured as shown below. Plugin will then
-# load the specified extensions.
-# Default value is None, it wont load any extension
-# contrail_extensions=ipam:<classpath>,policy:<classpath>
diff --git a/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini b/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
deleted file mode 100644
index 9c8e6b58..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
+++ /dev/null
@@ -1,190 +0,0 @@
-[ovs]
-# (StrOpt) Type of network to allocate for tenant networks. The
-# default value 'local' is useful only for single-box testing and
-# provides no connectivity between hosts. You MUST either change this
-# to 'vlan' and configure network_vlan_ranges below or change this to
-# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for
-# tenant networks to provide connectivity between hosts. Set to 'none'
-# to disable creation of tenant networks.
-#
-# tenant_network_type = local
-# Example: tenant_network_type = gre
-# Example: tenant_network_type = vxlan
-
-# (ListOpt) Comma-separated list of
-# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
-# of VLAN IDs on named physical networks that are available for
-# allocation. All physical networks listed are available for flat and
-# VLAN provider network creation. Specified ranges of VLAN IDs are
-# available for tenant network allocation if tenant_network_type is
-# 'vlan'. If empty, only gre, vxlan and local networks may be created.
-#
-# network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999
-
-# (BoolOpt) Set to True in the server and the agents to enable support
-# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and
-# GRE or VXLAN tunneling.
-#
-# WARNING: This option will be deprecated in the Icehouse release, at which
-# point setting tunnel_type below will be required to enable
-# tunneling.
-#
-# enable_tunneling = False
-
-# (StrOpt) The type of tunnel network, if any, supported by the plugin. If
-# this is set, it will cause tunneling to be enabled. If this is not set and
-# the option enable_tunneling is set, this will default to 'gre'.
-#
-# tunnel_type =
-# Example: tunnel_type = gre
-# Example: tunnel_type = vxlan
-
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples
-# enumerating ranges of GRE or VXLAN tunnel IDs that are available for
-# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'.
-#
-# tunnel_id_ranges =
-# Example: tunnel_id_ranges = 1:1000
-
-# Do not change this parameter unless you have a good reason to.
-# This is the name of the OVS integration bridge. There is one per hypervisor.
-# The integration bridge acts as a virtual "patch bay". All VM VIFs are
-# attached to this bridge and then "patched" according to their network
-# connectivity.
-#
-# integration_bridge = br-int
-
-# Only used for the agent if tunnel_id_ranges (above) is not empty for
-# the server. In most cases, the default value should be fine.
-#
-# tunnel_bridge = br-tun
-
-# Peer patch port in integration bridge for tunnel bridge
-# int_peer_patch_port = patch-tun
-
-# Peer patch port in tunnel bridge for integration bridge
-# tun_peer_patch_port = patch-int
-
-# Uncomment this line for the agent if tunnel_id_ranges (above) is not
-# empty for the server. Set local-ip to be the local IP address of
-# this hypervisor.
-#
-# local_ip =
-
-# (ListOpt) Comma-separated list of <physical_network>:<bridge> tuples
-# mapping physical network names to the agent's node-specific OVS
-# bridge names to be used for flat and VLAN networks. The length of
-# bridge names should be no more than 11. Each bridge must
-# exist, and should have a physical network interface configured as a
-# port. All physical networks listed in network_vlan_ranges on the
-# server should have mappings to appropriate bridges on each agent.
-#
-# bridge_mappings =
-# Example: bridge_mappings = physnet1:br-eth1
-
-# (BoolOpt) Use veths instead of patch ports to interconnect the integration
-# bridge to physical networks. Support kernel without ovs patch port support
-# so long as it is set to True.
-# use_veth_interconnection = False
-
-[agent]
-# Agent's polling interval in seconds
-# polling_interval = 2
-
-# Minimize polling by monitoring ovsdb for interface changes
-# minimize_polling = True
-
-# When minimize_polling = True, the number of seconds to wait before
-# respawning the ovsdb monitor after losing communication with it
-# ovsdb_monitor_respawn_interval = 30
-
-# (ListOpt) The types of tenant network tunnels supported by the agent.
-# Setting this will enable tunneling support in the agent. This can be set to
-# either 'gre' or 'vxlan'. If this is unset, it will default to [] and
-# disable tunneling support in the agent. When running the agent with the OVS
-# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section.
-# When running the agent with ML2, you can specify as many values here as
-# your compute hosts supports.
-#
-# tunnel_types =
-# Example: tunnel_types = gre
-# Example: tunnel_types = vxlan
-# Example: tunnel_types = vxlan, gre
-
-# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By
-# default, this will make use of the Open vSwitch default value of '4789' if
-# not specified.
-#
-# vxlan_udp_port =
-# Example: vxlan_udp_port = 8472
-
-# (IntOpt) This is the MTU size of veth interfaces.
-# Do not change unless you have a good reason to.
-# The default MTU size of veth interfaces is 1500.
-# This option has no effect if use_veth_interconnection is False
-# veth_mtu =
-# Example: veth_mtu = 1504
-
-# (BoolOpt) Flag to enable l2-population extension. This option should only be
-# used in conjunction with ml2 plugin and l2population mechanism driver. It'll
-# enable plugin to populate remote ports macs and IPs (using fdb_add/remove
-# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to
-# optimize tunnel management.
-#
-# l2_population = False
-
-# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2
-# population ML2 MechanismDriver.
-#
-# arp_responder = False
-
-# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet
-# carrying GRE/VXLAN tunnel. The default value is True.
-#
-# dont_fragment = True
-
-# (BoolOpt) Set to True on L2 agents to enable support
-# for distributed virtual routing.
-#
-# enable_distributed_routing = False
-
-[securitygroup]
-# Firewall driver for realizing neutron security group function.
-# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-
-#-----------------------------------------------------------------------------
-# Sample Configurations.
-#-----------------------------------------------------------------------------
-#
-# 1. With VLANs on eth1.
-# [ovs]
-# network_vlan_ranges = default:2000:3999
-# tunnel_id_ranges =
-# integration_bridge = br-int
-# bridge_mappings = default:br-eth1
-#
-# 2. With GRE tunneling.
-# [ovs]
-# network_vlan_ranges =
-# tunnel_id_ranges = 1:1000
-# integration_bridge = br-int
-# tunnel_bridge = br-tun
-# local_ip = 10.0.0.3
-#
-# 3. With VXLAN tunneling.
-# [ovs]
-# network_vlan_ranges =
-# tenant_network_type = vxlan
-# tunnel_type = vxlan
-# tunnel_id_ranges = 1:1000
-# integration_bridge = br-int
-# tunnel_bridge = br-tun
-# local_ip = 10.0.0.3
-# [agent]
-# tunnel_types = vxlan
diff --git a/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini b/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini
deleted file mode 100644
index bfe8062a..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/plumgrid/plumgrid.ini
+++ /dev/null
@@ -1,14 +0,0 @@
-# Config file for Neutron PLUMgrid Plugin
-
-[plumgriddirector]
-# This line should be pointing to the PLUMgrid Director,
-# for the PLUMgrid platform.
-# director_server=<director-ip-address>
-# director_server_port=<director-port>
-# Authentification parameters for the Director.
-# These are the admin credentials to manage and control
-# the PLUMgrid Director server.
-# username=<director-admin-username>
-# password=<director-admin-password>
-# servertimeout=5
-# driver=<plugin-driver>
diff --git a/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini b/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini
deleted file mode 100644
index 9d9cfa25..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/ryu/ryu.ini
+++ /dev/null
@@ -1,44 +0,0 @@
-[ovs]
-# integration_bridge = br-int
-
-# openflow_rest_api = <host IP address of ofp rest api service>:<port: 8080>
-# openflow_rest_api = 127.0.0.1:8080
-
-# tunnel key range: 0 < tunnel_key_min < tunnel_key_max
-# VLAN: 12bits, GRE, VXLAN: 24bits
-# tunnel_key_min = 1
-# tunnel_key_max = 0xffffff
-
-# tunnel_ip = <ip address for tunneling>
-# tunnel_interface = interface for tunneling
-# when tunnel_ip is NOT specified, ip address is read
-# from this interface
-# tunnel_ip =
-# tunnel_interface =
-tunnel_interface = eth0
-
-# ovsdb_port = port number on which ovsdb is listening
-# ryu-agent uses this parameter to setup ovsdb.
-# ovs-vsctl set-manager ptcp:<ovsdb_port>
-# See set-manager section of man ovs-vsctl for details.
-# currently ptcp is only supported.
-# ovsdb_ip = <host IP address on which ovsdb is listening>
-# ovsdb_interface = interface for ovsdb
-# when ovsdb_addr NOT specifiied, ip address is gotten
-# from this interface
-# ovsdb_port = 6634
-# ovsdb_ip =
-# ovsdb_interface =
-ovsdb_interface = eth0
-
-[securitygroup]
-# Firewall driver for realizing neutron security group function
-# firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-
-[agent]
-# Agent's polling interval in seconds
-# polling_interval = 2
diff --git a/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini b/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini
deleted file mode 100644
index baca73b8..00000000
--- a/openstack/usr/share/openstack/neutron/plugins/vmware/nsx.ini
+++ /dev/null
@@ -1,200 +0,0 @@
-[DEFAULT]
-# User name for NSX controller
-# nsx_user = admin
-
-# Password for NSX controller
-# nsx_password = admin
-
-# Time before aborting a request on an unresponsive controller (Seconds)
-# http_timeout = 75
-
-# Maximum number of times a particular request should be retried
-# retries = 2
-
-# Maximum number of times a redirect response should be followed
-# redirects = 2
-
-# Comma-separated list of NSX controller endpoints (<ip>:<port>). When port
-# is omitted, 443 is assumed. This option MUST be specified, e.g.:
-# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
-
-# UUID of the pre-existing default NSX Transport zone to be used for creating
-# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
-# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
-
-# (Optional) UUID for the default l3 gateway service to use with this cluster.
-# To be specified if planning to use logical routers with external gateways.
-# default_l3_gw_service_uuid =
-
-# (Optional) UUID for the default l2 gateway service to use with this cluster.
-# To be specified for providing a predefined gateway tenant for connecting their networks.
-# default_l2_gw_service_uuid =
-
-# (Optional) UUID for the default service cluster. A service cluster is introduced to
-# represent a group of gateways and it is needed in order to use Logical Services like
-# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this
-# config parameter *MUST BE* set to a valid pre-existent service cluster uuid.
-# default_service_cluster_uuid =
-
-# Name of the default interface name to be used on network-gateway. This value
-# will be used for any device associated with a network gateway for which an
-# interface name was not specified
-# default_interface_name = breth0
-
-[quotas]
-# number of network gateways allowed per tenant, -1 means unlimited
-# quota_network_gateway = 5
-
-[vcns]
-# URL for VCNS manager
-# manager_uri = https://management_ip
-
-# User name for VCNS manager
-# user = admin
-
-# Password for VCNS manager
-# password = default
-
-# (Optional) Datacenter ID for Edge deployment
-# datacenter_moid =
-
-# (Optional) Deployment Container ID for NSX Edge deployment
-# If not specified, either a default global container will be used, or
-# the resource pool and datastore specified below will be used
-# deployment_container_id =
-
-# (Optional) Resource pool ID for NSX Edge deployment
-# resource_pool_id =
-
-# (Optional) Datastore ID for NSX Edge deployment
-# datastore_id =
-
-# (Required) UUID of logic switch for physical network connectivity
-# external_network =
-
-# (Optional) Asynchronous task status check interval
-# default is 2000 (millisecond)
-# task_status_check_interval = 2000
-
-[nsx]
-# Maximum number of ports for each bridged logical switch
-# The recommended value for this parameter varies with NSX version
-# Please use:
-# NSX 2.x -> 64
-# NSX 3.0, 3.1 -> 5000
-# NSX 3.2 -> 10000
-# max_lp_per_bridged_ls = 5000
-
-# Maximum number of ports for each overlay (stt, gre) logical switch
-# max_lp_per_overlay_ls = 256
-
-# Number of connections to each controller node.
-# default is 10
-# concurrent_connections = 10
-
-# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
-# nsx_gen_timeout = -1
-
-# Acceptable values for 'metadata_mode' are:
-# - 'access_network': this enables a dedicated connection to the metadata
-# proxy for metadata server access via Neutron router.
-# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
-# This option is only useful if running on a host that does not support
-# namespaces otherwise access_network should be used.
-# metadata_mode = access_network
-
-# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
-# default_transport_type = stt
-
-# Specifies in which mode the plugin needs to operate in order to provide DHCP and
-# metadata proxy services to tenant instances. If 'agent' is chosen (default)
-# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
-# provide such services. In this mode, the plugin supports API extensions 'agent'
-# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse),
-# the plugin will use NSX logical services for DHCP and metadata proxy. This
-# simplifies the deployment model for Neutron, in that the plugin no longer requires
-# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
-# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above.
-# Furthermore, a 'combined' mode is also provided and is used to support existing
-# deployments that want to adopt the agentless mode going forward. With this mode,
-# existing networks keep being served by the existing infrastructure (thus preserving
-# backward compatibility, whereas new networks will be served by the new infrastructure.
-# Migration tools are provided to 'move' one network from one model to another; with
-# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is
-# ignored, as new networks will no longer be scheduled to existing dhcp agents.
-# agent_mode = agent
-
-# Specifies which mode packet replication should be done in. If set to service
-# a service node is required in order to perform packet replication. This can
-# also be set to source if one wants replication to be performed locally (NOTE:
-# usually only useful for testing if one does not want to deploy a service node).
-# In order to leverage distributed routers, replication_mode should be set to
-# "service".
-# replication_mode = service
-
-[nsx_sync]
-# Interval in seconds between runs of the status synchronization task.
-# The plugin will aim at resynchronizing operational status for all
-# resources in this interval, and it should be therefore large enough
-# to ensure the task is feasible. Otherwise the plugin will be
-# constantly synchronizing resource status, ie: a new task is started
-# as soon as the previous is completed.
-# If this value is set to 0, the state synchronization thread for this
-# Neutron instance will be disabled.
-# state_sync_interval = 10
-
-# Random additional delay between two runs of the state synchronization task.
-# An additional wait time between 0 and max_random_sync_delay seconds
-# will be added on top of state_sync_interval.
-# max_random_sync_delay = 0
-
-# Minimum delay, in seconds, between two status synchronization requests for NSX.
-# Depending on chunk size, controller load, and other factors, state
-# synchronization requests might be pretty heavy. This means the
-# controller might take time to respond, and its load might be quite
-# increased by them. This parameter allows to specify a minimum
-# interval between two subsequent requests.
-# The value for this parameter must never exceed state_sync_interval.
-# If this does, an error will be raised at startup.
-# min_sync_req_delay = 1
-
-# Minimum number of resources to be retrieved from NSX in a single status
-# synchronization request.
-# The actual size of the chunk will increase if the number of resources is such
-# that using the minimum chunk size will cause the interval between two
-# requests to be less than min_sync_req_delay
-# min_chunk_size = 500
-
-# Enable this option to allow punctual state synchronization on show
-# operations. In this way, show operations will always fetch the operational
-# status of the resource from the NSX backend, and this might have
-# a considerable impact on overall performance.
-# always_read_status = False
-
-[nsx_lsn]
-# Pull LSN information from NSX in case it is missing from the local
-# data store. This is useful to rebuild the local store in case of
-# server recovery
-# sync_on_missing_data = False
-
-[nsx_dhcp]
-# (Optional) Comma separated list of additional dns servers. Default is an empty list
-# extra_domain_name_servers =
-
-# Domain to use for building the hostnames
-# domain_name = openstacklocal
-
-# Default DHCP lease time
-# default_lease_time = 43200
-
-[nsx_metadata]
-# IP address used by Metadata server
-# metadata_server_address = 127.0.0.1
-
-# TCP Port used by Metadata server
-# metadata_server_port = 8775
-
-# When proxying metadata requests, Neutron signs the Instance-ID header with a
-# shared secret to prevent spoofing. You may select any string for a secret,
-# but it MUST match with the configuration used by the Metadata server
-# metadata_shared_secret =
diff --git a/openstack/usr/share/openstack/neutron/policy.json b/openstack/usr/share/openstack/neutron/policy.json
deleted file mode 100644
index e7db4357..00000000
--- a/openstack/usr/share/openstack/neutron/policy.json
+++ /dev/null
@@ -1,138 +0,0 @@
-{
- "context_is_admin": "role:admin",
- "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
- "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
- "admin_only": "rule:context_is_admin",
- "regular_user": "",
- "shared": "field:networks:shared=True",
- "shared_firewalls": "field:firewalls:shared=True",
- "external": "field:networks:router:external=True",
- "default": "rule:admin_or_owner",
-
- "create_subnet": "rule:admin_or_network_owner",
- "get_subnet": "rule:admin_or_owner or rule:shared",
- "update_subnet": "rule:admin_or_network_owner",
- "delete_subnet": "rule:admin_or_network_owner",
-
- "create_network": "",
- "get_network": "rule:admin_or_owner or rule:shared or rule:external",
- "get_network:router:external": "rule:regular_user",
- "get_network:segments": "rule:admin_only",
- "get_network:provider:network_type": "rule:admin_only",
- "get_network:provider:physical_network": "rule:admin_only",
- "get_network:provider:segmentation_id": "rule:admin_only",
- "get_network:queue_id": "rule:admin_only",
- "create_network:shared": "rule:admin_only",
- "create_network:router:external": "rule:admin_only",
- "create_network:segments": "rule:admin_only",
- "create_network:provider:network_type": "rule:admin_only",
- "create_network:provider:physical_network": "rule:admin_only",
- "create_network:provider:segmentation_id": "rule:admin_only",
- "update_network": "rule:admin_or_owner",
- "update_network:segments": "rule:admin_only",
- "update_network:shared": "rule:admin_only",
- "update_network:provider:network_type": "rule:admin_only",
- "update_network:provider:physical_network": "rule:admin_only",
- "update_network:provider:segmentation_id": "rule:admin_only",
- "update_network:router:external": "rule:admin_only",
- "delete_network": "rule:admin_or_owner",
-
- "create_port": "",
- "create_port:mac_address": "rule:admin_or_network_owner",
- "create_port:fixed_ips": "rule:admin_or_network_owner",
- "create_port:port_security_enabled": "rule:admin_or_network_owner",
- "create_port:binding:host_id": "rule:admin_only",
- "create_port:binding:profile": "rule:admin_only",
- "create_port:mac_learning_enabled": "rule:admin_or_network_owner",
- "get_port": "rule:admin_or_owner",
- "get_port:queue_id": "rule:admin_only",
- "get_port:binding:vif_type": "rule:admin_only",
- "get_port:binding:vif_details": "rule:admin_only",
- "get_port:binding:host_id": "rule:admin_only",
- "get_port:binding:profile": "rule:admin_only",
- "update_port": "rule:admin_or_owner",
- "update_port:fixed_ips": "rule:admin_or_network_owner",
- "update_port:port_security_enabled": "rule:admin_or_network_owner",
- "update_port:binding:host_id": "rule:admin_only",
- "update_port:binding:profile": "rule:admin_only",
- "update_port:mac_learning_enabled": "rule:admin_or_network_owner",
- "delete_port": "rule:admin_or_owner",
-
- "get_router:ha": "rule:admin_only",
- "create_router": "rule:regular_user",
- "create_router:external_gateway_info:enable_snat": "rule:admin_only",
- "create_router:distributed": "rule:admin_only",
- "create_router:ha": "rule:admin_only",
- "get_router": "rule:admin_or_owner",
- "get_router:distributed": "rule:admin_only",
- "update_router:external_gateway_info:enable_snat": "rule:admin_only",
- "update_router:distributed": "rule:admin_only",
- "update_router:ha": "rule:admin_only",
- "delete_router": "rule:admin_or_owner",
-
- "add_router_interface": "rule:admin_or_owner",
- "remove_router_interface": "rule:admin_or_owner",
-
- "create_firewall": "",
- "get_firewall": "rule:admin_or_owner",
- "create_firewall:shared": "rule:admin_only",
- "get_firewall:shared": "rule:admin_only",
- "update_firewall": "rule:admin_or_owner",
- "update_firewall:shared": "rule:admin_only",
- "delete_firewall": "rule:admin_or_owner",
-
- "create_firewall_policy": "",
- "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
- "create_firewall_policy:shared": "rule:admin_or_owner",
- "update_firewall_policy": "rule:admin_or_owner",
- "delete_firewall_policy": "rule:admin_or_owner",
-
- "create_firewall_rule": "",
- "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
- "update_firewall_rule": "rule:admin_or_owner",
- "delete_firewall_rule": "rule:admin_or_owner",
-
- "create_qos_queue": "rule:admin_only",
- "get_qos_queue": "rule:admin_only",
-
- "update_agent": "rule:admin_only",
- "delete_agent": "rule:admin_only",
- "get_agent": "rule:admin_only",
-
- "create_dhcp-network": "rule:admin_only",
- "delete_dhcp-network": "rule:admin_only",
- "get_dhcp-networks": "rule:admin_only",
- "create_l3-router": "rule:admin_only",
- "delete_l3-router": "rule:admin_only",
- "get_l3-routers": "rule:admin_only",
- "get_dhcp-agents": "rule:admin_only",
- "get_l3-agents": "rule:admin_only",
- "get_loadbalancer-agent": "rule:admin_only",
- "get_loadbalancer-pools": "rule:admin_only",
-
- "create_floatingip": "rule:regular_user",
- "update_floatingip": "rule:admin_or_owner",
- "delete_floatingip": "rule:admin_or_owner",
- "get_floatingip": "rule:admin_or_owner",
-
- "create_network_profile": "rule:admin_only",
- "update_network_profile": "rule:admin_only",
- "delete_network_profile": "rule:admin_only",
- "get_network_profiles": "",
- "get_network_profile": "",
- "update_policy_profiles": "rule:admin_only",
- "get_policy_profiles": "",
- "get_policy_profile": "",
-
- "create_metering_label": "rule:admin_only",
- "delete_metering_label": "rule:admin_only",
- "get_metering_label": "rule:admin_only",
-
- "create_metering_label_rule": "rule:admin_only",
- "delete_metering_label_rule": "rule:admin_only",
- "get_metering_label_rule": "rule:admin_only",
-
- "get_service_provider": "rule:regular_user",
- "get_lsn": "rule:admin_only",
- "create_lsn": "rule:admin_only"
-}
diff --git a/openstack/usr/share/openstack/neutron/vpn_agent.ini b/openstack/usr/share/openstack/neutron/vpn_agent.ini
deleted file mode 100644
index c3089df9..00000000
--- a/openstack/usr/share/openstack/neutron/vpn_agent.ini
+++ /dev/null
@@ -1,14 +0,0 @@
-[DEFAULT]
-# VPN-Agent configuration file
-# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also
-
-[vpnagent]
-# vpn device drivers which vpn agent will use
-# If we want to use multiple drivers, we need to define this option multiple times.
-# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver
-# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver
-# vpn_device_driver=another_driver
-
-[ipsec]
-# Status check interval
-# ipsec_status_check_interval=60
diff --git a/strata/openstack-services/neutron.morph b/strata/openstack-services/neutron.morph
index b79a089b..6e203922 100644
--- a/strata/openstack-services/neutron.morph
+++ b/strata/openstack-services/neutron.morph
@@ -2,14 +2,24 @@ name: neutron
kind: chunk
build-system: python-distutils
post-install-commands:
-# Move rootwrap files to a proper location
-- mkdir -p "$DESTDIR"/etc/neutron
-- mv "$DESTDIR$PREFIX"/etc/neutron/rootwrap.d "$DESTDIR"/etc/neutron/
-- mv "$DESTDIR$PREFIX"/etc/neutron/rootwrap.conf "$DESTDIR"/etc/neutron/
-# Add neutron to sudoers controlling which commands is running as a root
-# using the openstack rootwrap.
-- mkdir -p "$DESTDIR"/etc/sudoers.d
- |
+ # Move the configuration files to a proper location
+ mkdir "$DESTDIR"/etc
+ mv "$DESTDIR/$PREFIX"/etc/neutron "$DESTDIR"/etc
+
+ # Remove unused start/stop script
+ rm "$DESTDIR/$PREFIX"/etc/init.d/neutron-server
+
+ # Remove configuration files which will be added by Ansible
+ rm "$DESTDIR"/etc/neutron/neutron.conf
+ rm "$DESTDIR"/etc/neutron/metadata_agent.ini
+ rm "$DESTDIR"/etc/neutron/plugins/ml2/ml2_conf.ini
+ rm "$DESTDIR"/etc/neutron/dhcp_agent.ini
+ rm "$DESTDIR"/etc/neutron/l3_agent.ini
+
+ # Add neutron to sudoers controlling which commands is running as a
+ # root using the openstack rootwrap.
+ mkdir -p "$DESTDIR"/etc/sudoers.d
install -D -m 0440 /proc/self/fd/0 <<'EOF' "$DESTDIR"/etc/sudoers.d/neutron-rootwrap
Defaults:neutron !requiretty