summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark McClain <mmcclain@yahoo-inc.com>2014-09-24 04:00:54 +0000
committerarmando-migliaccio <armamig@gmail.com>2014-10-01 21:16:27 -0700
commit205162f58050fcb94db53cd51b674d2093dfe700 (patch)
treedecc29f1168470147af36e02c99eeb97b74a95bf
parentd3d0460a8d843077dbdeb9b46034bfe28e0ce2a4 (diff)
downloadneutron-205162f58050fcb94db53cd51b674d2093dfe700.tar.gz
remove openvswitch plugin2014.2.rc1
This changeset removes the openvswitch plugin, but retains the agent for ML2 The database models were not removed since operators will need to migrate the data. Change-Id: I8b519cb2bbebcbec2c78bb0ec9325716970736cf Closes-Bug: 1323729
-rw-r--r--neutron/plugins/openvswitch/README8
-rw-r--r--neutron/plugins/openvswitch/ovs_db_v2.py396
-rw-r--r--neutron/plugins/openvswitch/ovs_neutron_plugin.py651
-rw-r--r--neutron/tests/unit/nec/test_agent_scheduler.py2
-rw-r--r--neutron/tests/unit/openvswitch/test_agent_scheduler.py30
-rw-r--r--neutron/tests/unit/openvswitch/test_openvswitch_plugin.py159
-rw-r--r--neutron/tests/unit/openvswitch/test_ovs_db.py315
-rw-r--r--neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py3
-rw-r--r--neutron/tests/unit/openvswitch/test_ovs_rpcapi.py134
-rw-r--r--neutron/tests/unit/openvswitch/test_ovs_security_group.py101
-rw-r--r--neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py3
-rw-r--r--neutron/tests/unit/test_extension_extended_attribute.py4
-rw-r--r--neutron/tests/unit/test_l3_plugin.py8
13 files changed, 33 insertions, 1781 deletions
diff --git a/neutron/plugins/openvswitch/README b/neutron/plugins/openvswitch/README
index b8991ad0a2..005aca36fd 100644
--- a/neutron/plugins/openvswitch/README
+++ b/neutron/plugins/openvswitch/README
@@ -1,6 +1,4 @@
-The Open vSwitch (OVS) Neutron plugin is a simple plugin to manage OVS
-features using a local agent running on each hypervisor.
+The Open vSwitch (OVS) Neutron plugin has been removed and replaced by ML2. You
+must run the migration manually to upgrade to Juno.
-For details on how to configure and use the plugin, see:
-
-http://openvswitch.org/openstack/documentation/
+See neutron/db/migration/migrate_to_ml2.py
diff --git a/neutron/plugins/openvswitch/ovs_db_v2.py b/neutron/plugins/openvswitch/ovs_db_v2.py
deleted file mode 100644
index c9820ce3bb..0000000000
--- a/neutron/plugins/openvswitch/ovs_db_v2.py
+++ /dev/null
@@ -1,396 +0,0 @@
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.db import exception as db_exc
-from six import moves
-from sqlalchemy import func
-from sqlalchemy.orm import exc
-
-from neutron.common import exceptions as n_exc
-import neutron.db.api as db
-from neutron.db import models_v2
-from neutron.db import securitygroups_db as sg_db
-from neutron.extensions import securitygroup as ext_sg
-from neutron import manager
-from neutron.openstack.common import log as logging
-from neutron.plugins.openvswitch.common import constants
-from neutron.plugins.openvswitch import ovs_models_v2
-
-LOG = logging.getLogger(__name__)
-
-
-def get_network_binding(session, network_id):
- session = session or db.get_session()
- try:
- binding = (session.query(ovs_models_v2.NetworkBinding).
- filter_by(network_id=network_id).
- one())
- return binding
- except exc.NoResultFound:
- return
-
-
-def add_network_binding(session, network_id, network_type,
- physical_network, segmentation_id):
- with session.begin(subtransactions=True):
- binding = ovs_models_v2.NetworkBinding(network_id, network_type,
- physical_network,
- segmentation_id)
- session.add(binding)
- return binding
-
-
-def sync_vlan_allocations(network_vlan_ranges):
- """Synchronize vlan_allocations table with configured VLAN ranges."""
-
- session = db.get_session()
- with session.begin():
- # get existing allocations for all physical networks
- allocations = dict()
- allocs = (session.query(ovs_models_v2.VlanAllocation).
- all())
- for alloc in allocs:
- if alloc.physical_network not in allocations:
- allocations[alloc.physical_network] = set()
- allocations[alloc.physical_network].add(alloc)
-
- # process vlan ranges for each configured physical network
- for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
- # determine current configured allocatable vlans for this
- # physical network
- vlan_ids = set()
- for vlan_range in vlan_ranges:
- vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1))
-
- # remove from table unallocated vlans not currently allocatable
- if physical_network in allocations:
- for alloc in allocations[physical_network]:
- try:
- # see if vlan is allocatable
- vlan_ids.remove(alloc.vlan_id)
- except KeyError:
- # it's not allocatable, so check if its allocated
- if not alloc.allocated:
- # it's not, so remove it from table
- LOG.debug(_("Removing vlan %(vlan_id)s on "
- "physical network "
- "%(physical_network)s from pool"),
- {'vlan_id': alloc.vlan_id,
- 'physical_network': physical_network})
- session.delete(alloc)
- del allocations[physical_network]
-
- # add missing allocatable vlans to table
- for vlan_id in sorted(vlan_ids):
- alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id)
- session.add(alloc)
-
- # remove from table unallocated vlans for any unconfigured physical
- # networks
- for allocs in allocations.itervalues():
- for alloc in allocs:
- if not alloc.allocated:
- LOG.debug(_("Removing vlan %(vlan_id)s on physical "
- "network %(physical_network)s from pool"),
- {'vlan_id': alloc.vlan_id,
- 'physical_network': alloc.physical_network})
- session.delete(alloc)
-
-
-def get_vlan_allocation(physical_network, vlan_id):
- session = db.get_session()
- try:
- alloc = (session.query(ovs_models_v2.VlanAllocation).
- filter_by(physical_network=physical_network,
- vlan_id=vlan_id).
- one())
- return alloc
- except exc.NoResultFound:
- return
-
-
-def reserve_vlan(session):
- with session.begin(subtransactions=True):
- alloc = (session.query(ovs_models_v2.VlanAllocation).
- filter_by(allocated=False).
- with_lockmode('update').
- first())
- if alloc:
- LOG.debug(_("Reserving vlan %(vlan_id)s on physical network "
- "%(physical_network)s from pool"),
- {'vlan_id': alloc.vlan_id,
- 'physical_network': alloc.physical_network})
- alloc.allocated = True
- return (alloc.physical_network, alloc.vlan_id)
- raise n_exc.NoNetworkAvailable()
-
-
-def reserve_specific_vlan(session, physical_network, vlan_id):
- with session.begin(subtransactions=True):
- try:
- alloc = (session.query(ovs_models_v2.VlanAllocation).
- filter_by(physical_network=physical_network,
- vlan_id=vlan_id).
- with_lockmode('update').
- one())
- if alloc.allocated:
- if vlan_id == constants.FLAT_VLAN_ID:
- raise n_exc.FlatNetworkInUse(
- physical_network=physical_network)
- else:
- raise n_exc.VlanIdInUse(vlan_id=vlan_id,
- physical_network=physical_network)
- LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
- "network %(physical_network)s from pool"),
- {'vlan_id': vlan_id,
- 'physical_network': physical_network})
- alloc.allocated = True
- except exc.NoResultFound:
- LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
- "network %(physical_network)s outside pool"),
- {'vlan_id': vlan_id,
- 'physical_network': physical_network})
- alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id)
- alloc.allocated = True
- session.add(alloc)
-
-
-def release_vlan(session, physical_network, vlan_id, network_vlan_ranges):
- with session.begin(subtransactions=True):
- try:
- alloc = (session.query(ovs_models_v2.VlanAllocation).
- filter_by(physical_network=physical_network,
- vlan_id=vlan_id).
- with_lockmode('update').
- one())
- alloc.allocated = False
- inside = False
- for vlan_range in network_vlan_ranges.get(physical_network, []):
- if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]:
- inside = True
- break
- if not inside:
- session.delete(alloc)
- LOG.debug(_("Releasing vlan %(vlan_id)s on physical network "
- "%(physical_network)s outside pool"),
- {'vlan_id': vlan_id,
- 'physical_network': physical_network})
- else:
- LOG.debug(_("Releasing vlan %(vlan_id)s on physical network "
- "%(physical_network)s to pool"),
- {'vlan_id': vlan_id,
- 'physical_network': physical_network})
- except exc.NoResultFound:
- LOG.warning(_("vlan_id %(vlan_id)s on physical network "
- "%(physical_network)s not found"),
- {'vlan_id': vlan_id,
- 'physical_network': physical_network})
-
-
-def sync_tunnel_allocations(tunnel_id_ranges):
- """Synchronize tunnel_allocations table with configured tunnel ranges."""
-
- # determine current configured allocatable tunnels
- tunnel_ids = set()
- for tunnel_id_range in tunnel_id_ranges:
- tun_min, tun_max = tunnel_id_range
- if tun_max + 1 - tun_min > 1000000:
- LOG.error(_("Skipping unreasonable tunnel ID range "
- "%(tun_min)s:%(tun_max)s"),
- {'tun_min': tun_min, 'tun_max': tun_max})
- else:
- tunnel_ids |= set(moves.xrange(tun_min, tun_max + 1))
-
- session = db.get_session()
- with session.begin():
- # remove from table unallocated tunnels not currently allocatable
- allocs = (session.query(ovs_models_v2.TunnelAllocation).
- all())
- for alloc in allocs:
- try:
- # see if tunnel is allocatable
- tunnel_ids.remove(alloc.tunnel_id)
- except KeyError:
- # it's not allocatable, so check if its allocated
- if not alloc.allocated:
- # it's not, so remove it from table
- LOG.debug(_("Removing tunnel %s from pool"),
- alloc.tunnel_id)
- session.delete(alloc)
-
- # add missing allocatable tunnels to table
- for tunnel_id in sorted(tunnel_ids):
- alloc = ovs_models_v2.TunnelAllocation(tunnel_id)
- session.add(alloc)
-
-
-def get_tunnel_allocation(tunnel_id):
- session = db.get_session()
- try:
- alloc = (session.query(ovs_models_v2.TunnelAllocation).
- filter_by(tunnel_id=tunnel_id).
- with_lockmode('update').
- one())
- return alloc
- except exc.NoResultFound:
- return
-
-
-def reserve_tunnel(session):
- with session.begin(subtransactions=True):
- alloc = (session.query(ovs_models_v2.TunnelAllocation).
- filter_by(allocated=False).
- with_lockmode('update').
- first())
- if alloc:
- LOG.debug(_("Reserving tunnel %s from pool"), alloc.tunnel_id)
- alloc.allocated = True
- return alloc.tunnel_id
- raise n_exc.NoNetworkAvailable()
-
-
-def reserve_specific_tunnel(session, tunnel_id):
- with session.begin(subtransactions=True):
- try:
- alloc = (session.query(ovs_models_v2.TunnelAllocation).
- filter_by(tunnel_id=tunnel_id).
- with_lockmode('update').
- one())
- if alloc.allocated:
- raise n_exc.TunnelIdInUse(tunnel_id=tunnel_id)
- LOG.debug(_("Reserving specific tunnel %s from pool"), tunnel_id)
- alloc.allocated = True
- except exc.NoResultFound:
- LOG.debug(_("Reserving specific tunnel %s outside pool"),
- tunnel_id)
- alloc = ovs_models_v2.TunnelAllocation(tunnel_id)
- alloc.allocated = True
- session.add(alloc)
-
-
-def release_tunnel(session, tunnel_id, tunnel_id_ranges):
- with session.begin(subtransactions=True):
- try:
- alloc = (session.query(ovs_models_v2.TunnelAllocation).
- filter_by(tunnel_id=tunnel_id).
- with_lockmode('update').
- one())
- alloc.allocated = False
- inside = False
- for tunnel_id_range in tunnel_id_ranges:
- if (tunnel_id >= tunnel_id_range[0]
- and tunnel_id <= tunnel_id_range[1]):
- inside = True
- break
- if not inside:
- session.delete(alloc)
- LOG.debug(_("Releasing tunnel %s outside pool"), tunnel_id)
- else:
- LOG.debug(_("Releasing tunnel %s to pool"), tunnel_id)
- except exc.NoResultFound:
- LOG.warning(_("tunnel_id %s not found"), tunnel_id)
-
-
-def get_port(port_id):
- session = db.get_session()
- try:
- port = session.query(models_v2.Port).filter_by(id=port_id).one()
- except exc.NoResultFound:
- port = None
- return port
-
-
-def get_port_from_device(port_id):
- """Get port from database."""
- LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id)
- session = db.get_session()
- sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
-
- query = session.query(models_v2.Port,
- sg_db.SecurityGroupPortBinding.security_group_id)
- query = query.outerjoin(sg_db.SecurityGroupPortBinding,
- models_v2.Port.id == sg_binding_port)
- query = query.filter(models_v2.Port.id == port_id)
- port_and_sgs = query.all()
- if not port_and_sgs:
- return None
- port = port_and_sgs[0][0]
- plugin = manager.NeutronManager.get_plugin()
- port_dict = plugin._make_port_dict(port)
- port_dict[ext_sg.SECURITYGROUPS] = [
- sg_id for port_, sg_id in port_and_sgs if sg_id]
- port_dict['security_group_rules'] = []
- port_dict['security_group_source_groups'] = []
- port_dict['fixed_ips'] = [ip['ip_address']
- for ip in port['fixed_ips']]
- return port_dict
-
-
-def set_port_status(port_id, status):
- session = db.get_session()
- try:
- port = session.query(models_v2.Port).filter_by(id=port_id).one()
- port['status'] = status
- session.merge(port)
- session.flush()
- except exc.NoResultFound:
- raise n_exc.PortNotFound(port_id=port_id)
-
-
-def get_tunnel_endpoints():
- session = db.get_session()
-
- tunnels = session.query(ovs_models_v2.TunnelEndpoint)
- return [{'id': tunnel.id,
- 'ip_address': tunnel.ip_address} for tunnel in tunnels]
-
-
-def _generate_tunnel_id(session):
- max_tunnel_id = session.query(
- func.max(ovs_models_v2.TunnelEndpoint.id)).scalar() or 0
- return max_tunnel_id + 1
-
-
-def add_tunnel_endpoint(ip, max_retries=10):
- """Return the endpoint of the given IP address or generate a new one."""
-
- # NOTE(rpodolyaka): generation of a new tunnel endpoint must be put into a
- # repeatedly executed transactional block to ensure it
- # doesn't conflict with any other concurrently executed
- # DB transactions in spite of the specified transactions
- # isolation level value
- for i in moves.xrange(max_retries):
- LOG.debug(_('Adding a tunnel endpoint for %s'), ip)
- try:
- session = db.get_session()
- with session.begin(subtransactions=True):
- tunnel = (session.query(ovs_models_v2.TunnelEndpoint).
- filter_by(ip_address=ip).with_lockmode('update').
- first())
-
- if tunnel is None:
- tunnel_id = _generate_tunnel_id(session)
- tunnel = ovs_models_v2.TunnelEndpoint(ip, tunnel_id)
- session.add(tunnel)
-
- return tunnel
- except db_exc.DBDuplicateEntry:
- # a concurrent transaction has been committed, try again
- LOG.debug(_('Adding a tunnel endpoint failed due to a concurrent'
- 'transaction had been committed (%s attempts left)'),
- max_retries - (i + 1))
-
- raise n_exc.NeutronException(
- message=_('Unable to generate a new tunnel id'))
diff --git a/neutron/plugins/openvswitch/ovs_neutron_plugin.py b/neutron/plugins/openvswitch/ovs_neutron_plugin.py
deleted file mode 100644
index 7dad9b66d9..0000000000
--- a/neutron/plugins/openvswitch/ovs_neutron_plugin.py
+++ /dev/null
@@ -1,651 +0,0 @@
-# Copyright 2011 VMware, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sys
-
-from oslo.config import cfg
-
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
-from neutron.api.rpc.handlers import dhcp_rpc
-from neutron.api.rpc.handlers import l3_rpc
-from neutron.api.rpc.handlers import securitygroups_rpc
-from neutron.api.v2 import attributes
-from neutron.common import constants as q_const
-from neutron.common import exceptions as n_exc
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import allowedaddresspairs_db as addr_pair_db
-from neutron.db import db_base_plugin_v2
-from neutron.db import external_net_db
-from neutron.db import extradhcpopt_db
-from neutron.db import extraroute_db
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_gwmode_db
-from neutron.db import portbindings_db
-from neutron.db import quota_db # noqa
-from neutron.db import securitygroups_rpc_base as sg_db_rpc
-from neutron.extensions import allowedaddresspairs as addr_pair
-from neutron.extensions import extra_dhcp_opt as edo_ext
-from neutron.extensions import portbindings
-from neutron.extensions import providernet as provider
-from neutron.extensions import securitygroup as ext_sg
-from neutron import manager
-from neutron.openstack.common import importutils
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants as svc_constants
-from neutron.plugins.common import utils as plugin_utils
-from neutron.plugins.openvswitch.common import config # noqa
-from neutron.plugins.openvswitch.common import constants
-from neutron.plugins.openvswitch import ovs_db_v2
-
-
-LOG = logging.getLogger(__name__)
-
-
-class OVSRpcCallbacks(n_rpc.RpcCallback):
-
- # history
- # 1.0 Initial version
- # 1.1 Support Security Group RPC
- # 1.2 Support get_devices_details_list
-
- RPC_API_VERSION = '1.2'
-
- def __init__(self, notifier, tunnel_type):
- super(OVSRpcCallbacks, self).__init__()
- self.notifier = notifier
- self.tunnel_type = tunnel_type
-
- def get_device_details(self, rpc_context, **kwargs):
- """Agent requests device details."""
- agent_id = kwargs.get('agent_id')
- device = kwargs.get('device')
- LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
- {'device': device, 'agent_id': agent_id})
- port = ovs_db_v2.get_port(device)
- if port:
- binding = ovs_db_v2.get_network_binding(None, port['network_id'])
- entry = {'device': device,
- 'network_id': port['network_id'],
- 'port_id': port['id'],
- 'admin_state_up': port['admin_state_up'],
- 'network_type': binding.network_type,
- 'segmentation_id': binding.segmentation_id,
- 'physical_network': binding.physical_network}
- new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
- else q_const.PORT_STATUS_DOWN)
- if port['status'] != new_status:
- ovs_db_v2.set_port_status(port['id'], new_status)
- else:
- entry = {'device': device}
- LOG.debug(_("%s can not be found in database"), device)
- return entry
-
- def get_devices_details_list(self, rpc_context, **kwargs):
- return [
- self.get_device_details(
- rpc_context,
- device=device,
- **kwargs
- )
- for device in kwargs.pop('devices', [])
- ]
-
- def update_device_down(self, rpc_context, **kwargs):
- """Device no longer exists on agent."""
- agent_id = kwargs.get('agent_id')
- device = kwargs.get('device')
- host = kwargs.get('host')
- port = ovs_db_v2.get_port(device)
- LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
- {'device': device, 'agent_id': agent_id})
- if port:
- entry = {'device': device,
- 'exists': True}
- plugin = manager.NeutronManager.get_plugin()
- if (host and
- not plugin.get_port_host(rpc_context, port['id']) == host):
- LOG.debug(_("Device %(device)s not bound to the"
- " agent host %(host)s"),
- {'device': device, 'host': host})
- elif port['status'] != q_const.PORT_STATUS_DOWN:
- # Set port status to DOWN
- ovs_db_v2.set_port_status(port['id'],
- q_const.PORT_STATUS_DOWN)
- else:
- entry = {'device': device,
- 'exists': False}
- LOG.debug(_("%s can not be found in database"), device)
- return entry
-
- def update_device_up(self, rpc_context, **kwargs):
- """Device is up on agent."""
- agent_id = kwargs.get('agent_id')
- device = kwargs.get('device')
- host = kwargs.get('host')
- port = ovs_db_v2.get_port(device)
- LOG.debug(_("Device %(device)s up on %(agent_id)s"),
- {'device': device, 'agent_id': agent_id})
- plugin = manager.NeutronManager.get_plugin()
- if port:
- if (host and
- not plugin.get_port_host(rpc_context, port['id']) == host):
- LOG.debug(_("Device %(device)s not bound to the"
- " agent host %(host)s"),
- {'device': device, 'host': host})
- return
- elif port['status'] != q_const.PORT_STATUS_ACTIVE:
- ovs_db_v2.set_port_status(port['id'],
- q_const.PORT_STATUS_ACTIVE)
- else:
- LOG.debug(_("%s can not be found in database"), device)
-
- def tunnel_sync(self, rpc_context, **kwargs):
- """Update new tunnel.
-
- Updates the datbase with the tunnel IP. All listening agents will also
- be notified about the new tunnel IP.
- """
- tunnel_ip = kwargs.get('tunnel_ip')
- # Update the database with the IP
- tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip)
- tunnels = ovs_db_v2.get_tunnel_endpoints()
- entry = dict()
- entry['tunnels'] = tunnels
- # Notify all other listening agents
- self.notifier.tunnel_update(rpc_context, tunnel.ip_address,
- tunnel.id, self.tunnel_type)
- # Return the list of tunnels IP's to the agent
- return entry
-
-
-class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin):
-
- @classmethod
- def get_port_from_device(cls, device):
- port = ovs_db_v2.get_port_from_device(device)
- if port:
- port['device'] = device
- return port
-
-
-class AgentNotifierApi(n_rpc.RpcProxy,
- sg_rpc.SecurityGroupAgentRpcApiMixin):
- '''Agent side of the openvswitch rpc API.
-
- API version history:
- 1.0 - Initial version.
-
- '''
-
- BASE_RPC_API_VERSION = '1.0'
-
- def __init__(self, topic):
- super(AgentNotifierApi, self).__init__(
- topic=topic, default_version=self.BASE_RPC_API_VERSION)
- self.topic_network_delete = topics.get_topic_name(topic,
- topics.NETWORK,
- topics.DELETE)
- self.topic_port_update = topics.get_topic_name(topic,
- topics.PORT,
- topics.UPDATE)
- self.topic_tunnel_update = topics.get_topic_name(topic,
- constants.TUNNEL,
- topics.UPDATE)
-
- def network_delete(self, context, network_id):
- self.fanout_cast(context,
- self.make_msg('network_delete',
- network_id=network_id),
- topic=self.topic_network_delete)
-
- def port_update(self, context, port, network_type, segmentation_id,
- physical_network):
- self.fanout_cast(context,
- self.make_msg('port_update',
- port=port,
- network_type=network_type,
- segmentation_id=segmentation_id,
- physical_network=physical_network),
- topic=self.topic_port_update)
-
- def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type):
- self.fanout_cast(context,
- self.make_msg('tunnel_update',
- tunnel_ip=tunnel_ip,
- tunnel_id=tunnel_id,
- tunnel_type=tunnel_type),
- topic=self.topic_tunnel_update)
-
-
-class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
- external_net_db.External_net_db_mixin,
- extraroute_db.ExtraRoute_db_mixin,
- l3_gwmode_db.L3_NAT_db_mixin,
- SecurityGroupServerRpcMixin,
- l3_agentschedulers_db.L3AgentSchedulerDbMixin,
- agentschedulers_db.DhcpAgentSchedulerDbMixin,
- portbindings_db.PortBindingMixin,
- extradhcpopt_db.ExtraDhcpOptMixin,
- addr_pair_db.AllowedAddressPairsMixin):
-
- """Implement the Neutron abstractions using Open vSwitch.
-
- Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or
- a new VLAN is created for each network. An agent is relied upon to
- perform the actual OVS configuration on each host.
-
- The provider extension is also supported. As discussed in
- https://bugs.launchpad.net/neutron/+bug/1023156, this class could
- be simplified, and filtering on extended attributes could be
- handled, by adding support for extended attributes to the
- NeutronDbPluginV2 base class. When that occurs, this class should
- be updated to take advantage of it.
-
- The port binding extension enables an external application relay
- information to and from the plugin.
- """
-
- # This attribute specifies whether the plugin supports or not
- # bulk/pagination/sorting operations. Name mangling is used in
- # order to ensure it is qualified by class
- __native_bulk_support = True
- __native_pagination_support = True
- __native_sorting_support = True
-
- _supported_extension_aliases = ["provider", "external-net", "router",
- "ext-gw-mode", "binding", "quotas",
- "security-group", "agent", "extraroute",
- "l3_agent_scheduler",
- "dhcp_agent_scheduler",
- "extra_dhcp_opt",
- "allowed-address-pairs"]
-
- @property
- def supported_extension_aliases(self):
- if not hasattr(self, '_aliases'):
- aliases = self._supported_extension_aliases[:]
- sg_rpc.disable_security_group_extension_by_config(aliases)
- self._aliases = aliases
- return self._aliases
-
- db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
- attributes.NETWORKS, ['_extend_network_dict_provider_ovs'])
-
- def __init__(self, configfile=None):
- super(OVSNeutronPluginV2, self).__init__()
- self.base_binding_dict = {
- portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
- portbindings.VIF_DETAILS: {
- # TODO(rkukura): Replace with new VIF security details
- portbindings.CAP_PORT_FILTER:
- 'security-group' in self.supported_extension_aliases,
- portbindings.OVS_HYBRID_PLUG: True}}
- self._parse_network_vlan_ranges()
- ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges)
- self.tenant_network_type = cfg.CONF.OVS.tenant_network_type
- if self.tenant_network_type not in [svc_constants.TYPE_LOCAL,
- svc_constants.TYPE_VLAN,
- svc_constants.TYPE_GRE,
- svc_constants.TYPE_VXLAN,
- svc_constants.TYPE_NONE]:
- LOG.error(_("Invalid tenant_network_type: %s. "
- "Server terminated!"),
- self.tenant_network_type)
- sys.exit(1)
- self.enable_tunneling = cfg.CONF.OVS.enable_tunneling
- self.tunnel_type = None
- if self.enable_tunneling:
- self.tunnel_type = (cfg.CONF.OVS.tunnel_type or
- svc_constants.TYPE_GRE)
- elif cfg.CONF.OVS.tunnel_type:
- self.tunnel_type = cfg.CONF.OVS.tunnel_type
- self.enable_tunneling = True
- self.tunnel_id_ranges = []
- if self.enable_tunneling:
- self._parse_tunnel_id_ranges()
- ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges)
- elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES:
- LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. "
- "Server terminated!"), self.tenant_network_type)
- sys.exit(1)
- self.setup_rpc()
- self.network_scheduler = importutils.import_object(
- cfg.CONF.network_scheduler_driver
- )
- self.router_scheduler = importutils.import_object(
- cfg.CONF.router_scheduler_driver
- )
-
- def setup_rpc(self):
- # RPC support
- self.service_topics = {svc_constants.CORE: topics.PLUGIN,
- svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
- self.conn = n_rpc.create_connection(new=True)
- self.notifier = AgentNotifierApi(topics.AGENT)
- self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
- dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
- )
- self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
- l3_rpc_agent_api.L3AgentNotifyAPI()
- )
- self.endpoints = [OVSRpcCallbacks(self.notifier, self.tunnel_type),
- securitygroups_rpc.SecurityGroupServerRpcCallback(),
- dhcp_rpc.DhcpRpcCallback(),
- l3_rpc.L3RpcCallback(),
- agents_db.AgentExtRpcCallback()]
- for svc_topic in self.service_topics.values():
- self.conn.create_consumer(svc_topic, self.endpoints, fanout=False)
- # Consume from all consumers in threads
- self.conn.consume_in_threads()
-
- def _parse_network_vlan_ranges(self):
- try:
- self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
- cfg.CONF.OVS.network_vlan_ranges)
- except Exception as ex:
- LOG.error(_("%s. Server terminated!"), ex)
- sys.exit(1)
- LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
-
- def _parse_tunnel_id_ranges(self):
- for entry in cfg.CONF.OVS.tunnel_id_ranges:
- entry = entry.strip()
- try:
- tun_min, tun_max = entry.split(':')
- self.tunnel_id_ranges.append((int(tun_min), int(tun_max)))
- except ValueError as ex:
- LOG.error(_("Invalid tunnel ID range: "
- "'%(range)s' - %(e)s. Server terminated!"),
- {'range': entry, 'e': ex})
- sys.exit(1)
- LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges)
-
- def _extend_network_dict_provider_ovs(self, network, net_db,
- net_binding=None):
- # this method used in two cases: when binding is provided explicitly
- # and when it is a part of db model object
- binding = net_db.binding if net_db else net_binding
- network[provider.NETWORK_TYPE] = binding.network_type
- if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
- network[provider.PHYSICAL_NETWORK] = None
- network[provider.SEGMENTATION_ID] = binding.segmentation_id
- elif binding.network_type == svc_constants.TYPE_FLAT:
- network[provider.PHYSICAL_NETWORK] = binding.physical_network
- network[provider.SEGMENTATION_ID] = None
- elif binding.network_type == svc_constants.TYPE_VLAN:
- network[provider.PHYSICAL_NETWORK] = binding.physical_network
- network[provider.SEGMENTATION_ID] = binding.segmentation_id
- elif binding.network_type == svc_constants.TYPE_LOCAL:
- network[provider.PHYSICAL_NETWORK] = None
- network[provider.SEGMENTATION_ID] = None
-
- def _process_provider_create(self, context, attrs):
- network_type = attrs.get(provider.NETWORK_TYPE)
- physical_network = attrs.get(provider.PHYSICAL_NETWORK)
- segmentation_id = attrs.get(provider.SEGMENTATION_ID)
-
- network_type_set = attributes.is_attr_set(network_type)
- physical_network_set = attributes.is_attr_set(physical_network)
- segmentation_id_set = attributes.is_attr_set(segmentation_id)
-
- if not (network_type_set or physical_network_set or
- segmentation_id_set):
- return (None, None, None)
-
- if not network_type_set:
- msg = _("provider:network_type required")
- raise n_exc.InvalidInput(error_message=msg)
- elif network_type == svc_constants.TYPE_FLAT:
- if segmentation_id_set:
- msg = _("provider:segmentation_id specified for flat network")
- raise n_exc.InvalidInput(error_message=msg)
- else:
- segmentation_id = constants.FLAT_VLAN_ID
- elif network_type == svc_constants.TYPE_VLAN:
- if not segmentation_id_set:
- msg = _("provider:segmentation_id required")
- raise n_exc.InvalidInput(error_message=msg)
- if not utils.is_valid_vlan_tag(segmentation_id):
- msg = (_("provider:segmentation_id out of range "
- "(%(min_id)s through %(max_id)s)") %
- {'min_id': q_const.MIN_VLAN_TAG,
- 'max_id': q_const.MAX_VLAN_TAG})
- raise n_exc.InvalidInput(error_message=msg)
- elif network_type in constants.TUNNEL_NETWORK_TYPES:
- if not self.enable_tunneling:
- msg = _("%s networks are not enabled") % network_type
- raise n_exc.InvalidInput(error_message=msg)
- if physical_network_set:
- msg = _("provider:physical_network specified for %s "
- "network") % network_type
- raise n_exc.InvalidInput(error_message=msg)
- else:
- physical_network = None
- if not segmentation_id_set:
- msg = _("provider:segmentation_id required")
- raise n_exc.InvalidInput(error_message=msg)
- elif network_type == svc_constants.TYPE_LOCAL:
- if physical_network_set:
- msg = _("provider:physical_network specified for local "
- "network")
- raise n_exc.InvalidInput(error_message=msg)
- else:
- physical_network = None
- if segmentation_id_set:
- msg = _("provider:segmentation_id specified for local "
- "network")
- raise n_exc.InvalidInput(error_message=msg)
- else:
- segmentation_id = None
- else:
- msg = _("provider:network_type %s not supported") % network_type
- raise n_exc.InvalidInput(error_message=msg)
-
- if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]:
- if physical_network_set:
- if physical_network not in self.network_vlan_ranges:
- msg = _("Unknown provider:physical_network "
- "%s") % physical_network
- raise n_exc.InvalidInput(error_message=msg)
- elif 'default' in self.network_vlan_ranges:
- physical_network = 'default'
- else:
- msg = _("provider:physical_network required")
- raise n_exc.InvalidInput(error_message=msg)
-
- return (network_type, physical_network, segmentation_id)
-
- def create_network(self, context, network):
- (network_type, physical_network,
- segmentation_id) = self._process_provider_create(context,
- network['network'])
-
- session = context.session
- #set up default security groups
- tenant_id = self._get_tenant_id_for_create(
- context, network['network'])
- self._ensure_default_security_group(context, tenant_id)
-
- with session.begin(subtransactions=True):
- if not network_type:
- # tenant network
- network_type = self.tenant_network_type
- if network_type == svc_constants.TYPE_NONE:
- raise n_exc.TenantNetworksDisabled()
- elif network_type == svc_constants.TYPE_VLAN:
- (physical_network,
- segmentation_id) = ovs_db_v2.reserve_vlan(session)
- elif network_type in constants.TUNNEL_NETWORK_TYPES:
- segmentation_id = ovs_db_v2.reserve_tunnel(session)
- # no reservation needed for TYPE_LOCAL
- else:
- # provider network
- if network_type in [svc_constants.TYPE_VLAN,
- svc_constants.TYPE_FLAT]:
- ovs_db_v2.reserve_specific_vlan(session, physical_network,
- segmentation_id)
- elif network_type in constants.TUNNEL_NETWORK_TYPES:
- ovs_db_v2.reserve_specific_tunnel(session, segmentation_id)
- # no reservation needed for TYPE_LOCAL
- net = super(OVSNeutronPluginV2, self).create_network(context,
- network)
- binding = ovs_db_v2.add_network_binding(session, net['id'],
- network_type,
- physical_network,
- segmentation_id)
-
- self._process_l3_create(context, net, network['network'])
- # passing None as db model to use binding object
- self._extend_network_dict_provider_ovs(net, None, binding)
- # note - exception will rollback entire transaction
- LOG.debug(_("Created network: %s"), net['id'])
- return net
-
- def update_network(self, context, id, network):
- provider._raise_if_updates_provider_attributes(network['network'])
-
- session = context.session
- with session.begin(subtransactions=True):
- net = super(OVSNeutronPluginV2, self).update_network(context, id,
- network)
- self._process_l3_update(context, net, network['network'])
- return net
-
- def delete_network(self, context, id):
- session = context.session
- with session.begin(subtransactions=True):
- binding = ovs_db_v2.get_network_binding(session, id)
- self._process_l3_delete(context, id)
- super(OVSNeutronPluginV2, self).delete_network(context, id)
- if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
- ovs_db_v2.release_tunnel(session, binding.segmentation_id,
- self.tunnel_id_ranges)
- elif binding.network_type in [svc_constants.TYPE_VLAN,
- svc_constants.TYPE_FLAT]:
- ovs_db_v2.release_vlan(session, binding.physical_network,
- binding.segmentation_id,
- self.network_vlan_ranges)
- # the network_binding record is deleted via cascade from
- # the network record, so explicit removal is not necessary
- self.notifier.network_delete(context, id)
-
- def get_network(self, context, id, fields=None):
- session = context.session
- with session.begin(subtransactions=True):
- net = super(OVSNeutronPluginV2, self).get_network(context,
- id, None)
- return self._fields(net, fields)
-
- def get_networks(self, context, filters=None, fields=None,
- sorts=None,
- limit=None, marker=None, page_reverse=False):
- session = context.session
- with session.begin(subtransactions=True):
- nets = super(OVSNeutronPluginV2,
- self).get_networks(context, filters, None, sorts,
- limit, marker, page_reverse)
-
- return [self._fields(net, fields) for net in nets]
-
- def create_port(self, context, port):
- # Set port status as 'DOWN'. This will be updated by agent
- port['port']['status'] = q_const.PORT_STATUS_DOWN
- port_data = port['port']
- session = context.session
- with session.begin(subtransactions=True):
- self._ensure_default_security_group_on_port(context, port)
- sgids = self._get_security_groups_on_port(context, port)
- dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
- port = super(OVSNeutronPluginV2, self).create_port(context, port)
- self._process_portbindings_create_and_update(context,
- port_data, port)
- self._process_port_create_security_group(context, port, sgids)
- self._process_port_create_extra_dhcp_opts(context, port,
- dhcp_opts)
- port[addr_pair.ADDRESS_PAIRS] = (
- self._process_create_allowed_address_pairs(
- context, port,
- port_data.get(addr_pair.ADDRESS_PAIRS)))
- self.notify_security_groups_member_updated(context, port)
- return port
-
- def update_port(self, context, id, port):
- session = context.session
- need_port_update_notify = False
- with session.begin(subtransactions=True):
- original_port = super(OVSNeutronPluginV2, self).get_port(
- context, id)
- updated_port = super(OVSNeutronPluginV2, self).update_port(
- context, id, port)
- if addr_pair.ADDRESS_PAIRS in port['port']:
- need_port_update_notify |= (
- self.update_address_pairs_on_port(context, id, port,
- original_port,
- updated_port))
- need_port_update_notify |= self.update_security_group_on_port(
- context, id, port, original_port, updated_port)
- self._process_portbindings_create_and_update(context,
- port['port'],
- updated_port)
- need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
- context, id, port, updated_port)
-
- secgrp_member_updated = self.is_security_group_member_updated(
- context, original_port, updated_port)
- need_port_update_notify |= secgrp_member_updated
- if original_port['admin_state_up'] != updated_port['admin_state_up']:
- need_port_update_notify = True
-
- if need_port_update_notify:
- binding = ovs_db_v2.get_network_binding(None,
- updated_port['network_id'])
- self.notifier.port_update(context, updated_port,
- binding.network_type,
- binding.segmentation_id,
- binding.physical_network)
-
- if secgrp_member_updated:
- old_set = set(original_port.get(ext_sg.SECURITYGROUPS))
- new_set = set(updated_port.get(ext_sg.SECURITYGROUPS))
- self.notifier.security_groups_member_updated(
- context,
- old_set ^ new_set)
-
- return updated_port
-
- def delete_port(self, context, id, l3_port_check=True):
-
- # if needed, check to see if this is a port owned by
- # and l3-router. If so, we should prevent deletion.
- if l3_port_check:
- self.prevent_l3_port_deletion(context, id)
-
- session = context.session
- with session.begin(subtransactions=True):
- router_ids = self.disassociate_floatingips(
- context, id, do_notify=False)
- port = self.get_port(context, id)
- self._delete_port_security_group_bindings(context, id)
- super(OVSNeutronPluginV2, self).delete_port(context, id)
-
- # now that we've left db transaction, we are safe to notify
- self.notify_routers_updated(context, router_ids)
- self.notify_security_groups_member_updated(context, port)
diff --git a/neutron/tests/unit/nec/test_agent_scheduler.py b/neutron/tests/unit/nec/test_agent_scheduler.py
index 7e1b24db30..70f4f1e7ca 100644
--- a/neutron/tests/unit/nec/test_agent_scheduler.py
+++ b/neutron/tests/unit/nec/test_agent_scheduler.py
@@ -41,6 +41,7 @@ class NecDhcpAgentNotifierTestCase(
test_nec_plugin.NecPluginV2TestCaseBase):
plugin_str = test_nec_plugin.PLUGIN_NAME
+ l3_plugin = None
def setUp(self):
self.setup_nec_plugin_base()
@@ -64,6 +65,7 @@ class NecL3AgentSchedulerWithOpenFlowRouter(
test_nec_plugin.NecPluginV2TestCaseBase):
plugin_str = test_nec_plugin.PLUGIN_NAME
+ l3_plugin = None
def setUp(self):
self.setup_nec_plugin_base()
diff --git a/neutron/tests/unit/openvswitch/test_agent_scheduler.py b/neutron/tests/unit/openvswitch/test_agent_scheduler.py
index ed4f44023b..f73866f4e6 100644
--- a/neutron/tests/unit/openvswitch/test_agent_scheduler.py
+++ b/neutron/tests/unit/openvswitch/test_agent_scheduler.py
@@ -202,9 +202,9 @@ class OvsAgentSchedulerTestCaseBase(test_l3_plugin.L3NatTestCaseMixin,
AgentSchedulerTestMixIn,
test_plugin.NeutronDbPluginV2TestCase):
fmt = 'json'
- plugin_str = ('neutron.plugins.openvswitch.'
- 'ovs_neutron_plugin.OVSNeutronPluginV2')
- l3_plugin = None
+ plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
+ l3_plugin = ('neutron.tests.unit.test_l3_plugin.'
+ 'TestL3NatAgentSchedulingServicePlugin')
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP before loading plugin
@@ -1105,8 +1105,7 @@ class OvsDhcpAgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin,
test_agent_ext_plugin.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.NeutronDbPluginV2TestCase):
- plugin_str = ('neutron.plugins.openvswitch.'
- 'ovs_neutron_plugin.OVSNeutronPluginV2')
+ plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP before loading plugin
@@ -1271,9 +1270,9 @@ class OvsL3AgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin,
test_agent_ext_plugin.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.NeutronDbPluginV2TestCase):
- plugin_str = ('neutron.plugins.openvswitch.'
- 'ovs_neutron_plugin.OVSNeutronPluginV2')
- l3_plugin = None
+ plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
+ l3_plugin = ('neutron.tests.unit.test_l3_plugin.'
+ 'TestL3NatAgentSchedulingServicePlugin')
def setUp(self):
self.dhcp_notifier_cls_p = mock.patch(
@@ -1309,8 +1308,9 @@ class OvsL3AgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin,
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_router_add_to_l3_agent_notification(self):
- plugin = manager.NeutronManager.get_plugin()
- l3_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_L3]
+ l3_plugin = (manager.NeutronManager.get_service_plugins()
+ [service_constants.L3_ROUTER_NAT])
+ l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
with mock.patch.object(l3_notifier, 'cast') as mock_l3:
with self.router() as router1:
self._register_agent_states()
@@ -1330,8 +1330,9 @@ class OvsL3AgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin,
self._assert_notify(notifications, expected_event_type)
def test_router_remove_from_l3_agent_notification(self):
- plugin = manager.NeutronManager.get_plugin()
- l3_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_L3]
+ l3_plugin = (manager.NeutronManager.get_service_plugins()
+ [service_constants.L3_ROUTER_NAT])
+ l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
with mock.patch.object(l3_notifier, 'cast') as mock_l3:
with self.router() as router1:
self._register_agent_states()
@@ -1351,8 +1352,9 @@ class OvsL3AgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin,
self._assert_notify(notifications, expected_event_type)
def test_agent_updated_l3_agent_notification(self):
- plugin = manager.NeutronManager.get_plugin()
- l3_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_L3]
+ l3_plugin = (manager.NeutronManager.get_service_plugins()
+ [service_constants.L3_ROUTER_NAT])
+ l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
with mock.patch.object(l3_notifier, 'cast') as mock_l3:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
diff --git a/neutron/tests/unit/openvswitch/test_openvswitch_plugin.py b/neutron/tests/unit/openvswitch/test_openvswitch_plugin.py
deleted file mode 100644
index af1c1d0425..0000000000
--- a/neutron/tests/unit/openvswitch/test_openvswitch_plugin.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo.config import cfg
-
-from neutron import context
-from neutron.extensions import portbindings
-from neutron.extensions import securitygroup as ext_sg
-from neutron.plugins.openvswitch import ovs_neutron_plugin
-from neutron.tests.unit import _test_extension_portbindings as test_bindings
-from neutron.tests.unit import test_db_plugin as test_plugin
-from neutron.tests.unit import test_extension_allowedaddresspairs as test_pair
-from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
-
-import mock
-
-
-class OpenvswitchPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
-
- _plugin_name = ('neutron.plugins.openvswitch.'
- 'ovs_neutron_plugin.OVSNeutronPluginV2')
-
- def setUp(self):
- super(OpenvswitchPluginV2TestCase, self).setUp(self._plugin_name)
- self.port_create_status = 'DOWN'
-
-
-class TestOpenvswitchBasicGet(test_plugin.TestBasicGet,
- OpenvswitchPluginV2TestCase):
- pass
-
-
-class TestOpenvswitchV2HTTPResponse(test_plugin.TestV2HTTPResponse,
- OpenvswitchPluginV2TestCase):
- pass
-
-
-class TestOpenvswitchPortsV2(test_plugin.TestPortsV2,
- OpenvswitchPluginV2TestCase):
-
- def test_update_port_status_build(self):
- with self.port() as port:
- self.assertEqual(port['port']['status'], 'DOWN')
- self.assertEqual(self.port_create_status, 'DOWN')
-
-
-class TestOpenvswitchNetworksV2(test_plugin.TestNetworksV2,
- OpenvswitchPluginV2TestCase):
- pass
-
-
-class TestOpenvswitchPortBinding(OpenvswitchPluginV2TestCase,
- test_bindings.PortBindingsTestCase):
- VIF_TYPE = portbindings.VIF_TYPE_OVS
- HAS_PORT_FILTER = True
- ENABLE_SG = True
- FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER
-
- def setUp(self, firewall_driver=None):
- test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER)
- cfg.CONF.set_override(
- 'enable_security_group', self.ENABLE_SG,
- group='SECURITYGROUP')
- super(TestOpenvswitchPortBinding, self).setUp()
-
-
-class TestOpenvswitchPortBindingNoSG(TestOpenvswitchPortBinding):
- HAS_PORT_FILTER = False
- ENABLE_SG = False
- FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER
-
-
-class TestOpenvswitchPortBindingHost(
- OpenvswitchPluginV2TestCase,
- test_bindings.PortBindingsHostTestCaseMixin):
- pass
-
-
-class TestOpenvswitchAllowedAddressPairs(OpenvswitchPluginV2TestCase,
- test_pair.TestAllowedAddressPairs):
- pass
-
-
-class TestOpenvswitchUpdatePort(OpenvswitchPluginV2TestCase,
- ovs_neutron_plugin.OVSNeutronPluginV2):
-
- def test_update_port_add_remove_security_group(self):
- get_port_func = (
- 'neutron.db.db_base_plugin_v2.'
- 'NeutronDbPluginV2.get_port'
- )
- with mock.patch(get_port_func) as mock_get_port:
- mock_get_port.return_value = {
- ext_sg.SECURITYGROUPS: ["sg1", "sg2"],
- "admin_state_up": True,
- "fixed_ips": "fake_ip",
- "network_id": "fake_id"}
-
- update_port_func = (
- 'neutron.db.db_base_plugin_v2.'
- 'NeutronDbPluginV2.update_port'
- )
- with mock.patch(update_port_func) as mock_update_port:
- mock_update_port.return_value = {
- ext_sg.SECURITYGROUPS: ["sg2", "sg3"],
- "admin_state_up": True,
- "fixed_ips": "fake_ip",
- "network_id": "fake_id"}
-
- fake_func = (
- 'neutron.plugins.openvswitch.'
- 'ovs_db_v2.get_network_binding'
- )
- with mock.patch(fake_func) as mock_func:
- class MockBinding:
- network_type = "fake"
- segmentation_id = "fake"
- physical_network = "fake"
-
- mock_func.return_value = MockBinding()
-
- ctx = context.Context('', 'somebody')
- self.update_port(ctx, "id", {
- "port": {
- ext_sg.SECURITYGROUPS: [
- "sg2", "sg3"]}})
-
- sgmu = self.notifier.security_groups_member_updated
- sgmu.assert_called_with(ctx, set(['sg1', 'sg3']))
-
- def setUp(self):
- super(TestOpenvswitchUpdatePort, self).setUp()
- self.update_security_group_on_port = mock.MagicMock(return_value=True)
- self._process_portbindings_create_and_update = mock.MagicMock(
- return_value=True)
- self._update_extra_dhcp_opts_on_port = mock.MagicMock(
- return_value=True)
- self.update_address_pairs_on_port = mock.MagicMock(
- return_value=True)
-
- class MockNotifier:
- def __init__(self):
- self.port_update = mock.MagicMock(return_value=True)
- self.security_groups_member_updated = mock.MagicMock(
- return_value=True)
-
- self.notifier = MockNotifier()
diff --git a/neutron/tests/unit/openvswitch/test_ovs_db.py b/neutron/tests/unit/openvswitch/test_ovs_db.py
deleted file mode 100644
index a82f546b12..0000000000
--- a/neutron/tests/unit/openvswitch/test_ovs_db.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo.config import cfg
-from oslo.db import exception as db_exc
-from oslo.db.sqlalchemy import session
-from six import moves
-import testtools
-from testtools import matchers
-
-from neutron.common import exceptions as n_exc
-from neutron.db import api as db
-from neutron.plugins.openvswitch import ovs_db_v2
-from neutron.plugins.openvswitch import ovs_models_v2 as ovs_models
-from neutron.tests.unit import test_db_plugin as test_plugin
-from neutron.tests.unit import testlib_api
-
-PHYS_NET = 'physnet1'
-PHYS_NET_2 = 'physnet2'
-VLAN_MIN = 10
-VLAN_MAX = 19
-VLAN_RANGES = {PHYS_NET: [(VLAN_MIN, VLAN_MAX)]}
-UPDATED_VLAN_RANGES = {PHYS_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)],
- PHYS_NET_2: [(VLAN_MIN + 20, VLAN_MAX + 20)]}
-TUN_MIN = 100
-TUN_MAX = 109
-TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
-UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
-
-PLUGIN_NAME = ('neutron.plugins.openvswitch.'
- 'ovs_neutron_plugin.OVSNeutronPluginV2')
-
-
-class VlanAllocationsTest(testlib_api.SqlTestCase):
- def setUp(self):
- super(VlanAllocationsTest, self).setUp()
- ovs_db_v2.sync_vlan_allocations(VLAN_RANGES)
- self.session = db.get_session()
-
- def test_sync_vlan_allocations(self):
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MIN - 1))
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MIN).allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MIN + 1).allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MAX - 1).allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MAX).allocated)
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MAX + 1))
-
- ovs_db_v2.sync_vlan_allocations(UPDATED_VLAN_RANGES)
-
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MIN + 5 - 1))
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MIN + 5).
- allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MIN + 5 + 1).
- allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MAX + 5 - 1).
- allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MAX + 5).
- allocated)
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MAX + 5 + 1))
-
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET_2,
- VLAN_MIN + 20 - 1))
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET_2,
- VLAN_MIN + 20).
- allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET_2,
- VLAN_MIN + 20 + 1).
- allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET_2,
- VLAN_MAX + 20 - 1).
- allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET_2,
- VLAN_MAX + 20).
- allocated)
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET_2,
- VLAN_MAX + 20 + 1))
-
- ovs_db_v2.sync_vlan_allocations(VLAN_RANGES)
-
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MIN - 1))
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MIN).allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MIN + 1).allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MAX - 1).allocated)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MAX).allocated)
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- VLAN_MAX + 1))
-
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET_2,
- VLAN_MIN + 20))
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET_2,
- VLAN_MAX + 20))
-
- def test_vlan_pool(self):
- vlan_ids = set()
- for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1):
- physical_network, vlan_id = ovs_db_v2.reserve_vlan(self.session)
- self.assertEqual(physical_network, PHYS_NET)
- self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
- self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
- vlan_ids.add(vlan_id)
-
- with testtools.ExpectedException(n_exc.NoNetworkAvailable):
- physical_network, vlan_id = ovs_db_v2.reserve_vlan(self.session)
-
- ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_ids.pop(),
- VLAN_RANGES)
- physical_network, vlan_id = ovs_db_v2.reserve_vlan(self.session)
- self.assertEqual(physical_network, PHYS_NET)
- self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
- self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
- vlan_ids.add(vlan_id)
-
- for vlan_id in vlan_ids:
- ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_id,
- VLAN_RANGES)
-
- def test_specific_vlan_inside_pool(self):
- vlan_id = VLAN_MIN + 5
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- vlan_id).allocated)
- ovs_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
- self.assertTrue(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- vlan_id).allocated)
-
- with testtools.ExpectedException(n_exc.VlanIdInUse):
- ovs_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
-
- ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_id, VLAN_RANGES)
- self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- vlan_id).allocated)
-
- def test_specific_vlan_outside_pool(self):
- vlan_id = VLAN_MAX + 5
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, vlan_id))
- ovs_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
- self.assertTrue(ovs_db_v2.get_vlan_allocation(PHYS_NET,
- vlan_id).allocated)
-
- with testtools.ExpectedException(n_exc.VlanIdInUse):
- ovs_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
-
- ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_id, VLAN_RANGES)
- self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, vlan_id))
-
- def test_sync_with_allocated_false(self):
- vlan_ids = set()
- for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1):
- physical_network, vlan_id = ovs_db_v2.reserve_vlan(self.session)
- self.assertEqual(physical_network, PHYS_NET)
- self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
- self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
- vlan_ids.add(vlan_id)
-
- ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_ids.pop(),
- VLAN_RANGES)
- ovs_db_v2.sync_vlan_allocations({})
-
-
-class TunnelAllocationsTest(testlib_api.SqlTestCase):
- def setUp(self):
- super(TunnelAllocationsTest, self).setUp()
- ovs_db_v2.sync_tunnel_allocations(TUNNEL_RANGES)
- self.session = db.get_session()
-
- def test_sync_tunnel_allocations(self):
- self.assertIsNone(ovs_db_v2.get_tunnel_allocation(TUN_MIN - 1))
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MIN).allocated)
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MIN + 1).
- allocated)
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MAX - 1).
- allocated)
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MAX).allocated)
- self.assertIsNone(ovs_db_v2.get_tunnel_allocation(TUN_MAX + 1))
-
- ovs_db_v2.sync_tunnel_allocations(UPDATED_TUNNEL_RANGES)
-
- self.assertIsNone(ovs_db_v2.get_tunnel_allocation(TUN_MIN + 5 - 1))
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MIN + 5).
- allocated)
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MIN + 5 + 1).
- allocated)
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MAX + 5 - 1).
- allocated)
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MAX + 5).
- allocated)
- self.assertIsNone(ovs_db_v2.get_tunnel_allocation(TUN_MAX + 5 + 1))
-
- def test_tunnel_pool(self):
- tunnel_ids = set()
- for x in moves.xrange(TUN_MIN, TUN_MAX + 1):
- tunnel_id = ovs_db_v2.reserve_tunnel(self.session)
- self.assertThat(tunnel_id, matchers.GreaterThan(TUN_MIN - 1))
- self.assertThat(tunnel_id, matchers.LessThan(TUN_MAX + 1))
- tunnel_ids.add(tunnel_id)
-
- with testtools.ExpectedException(n_exc.NoNetworkAvailable):
- tunnel_id = ovs_db_v2.reserve_tunnel(self.session)
-
- ovs_db_v2.release_tunnel(self.session, tunnel_ids.pop(), TUNNEL_RANGES)
- tunnel_id = ovs_db_v2.reserve_tunnel(self.session)
- self.assertThat(tunnel_id, matchers.GreaterThan(TUN_MIN - 1))
- self.assertThat(tunnel_id, matchers.LessThan(TUN_MAX + 1))
- tunnel_ids.add(tunnel_id)
-
- for tunnel_id in tunnel_ids:
- ovs_db_v2.release_tunnel(self.session, tunnel_id, TUNNEL_RANGES)
-
- def test_add_tunnel_endpoints(self):
- tun_1 = ovs_db_v2.add_tunnel_endpoint('192.168.0.1')
- tun_2 = ovs_db_v2.add_tunnel_endpoint('192.168.0.2')
- self.assertEqual(1, tun_1.id)
- self.assertEqual('192.168.0.1', tun_1.ip_address)
- self.assertEqual(2, tun_2.id)
- self.assertEqual('192.168.0.2', tun_2.ip_address)
-
- def test_specific_tunnel_inside_pool(self):
- tunnel_id = TUN_MIN + 5
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(tunnel_id).allocated)
- ovs_db_v2.reserve_specific_tunnel(self.session, tunnel_id)
- self.assertTrue(ovs_db_v2.get_tunnel_allocation(tunnel_id).allocated)
-
- with testtools.ExpectedException(n_exc.TunnelIdInUse):
- ovs_db_v2.reserve_specific_tunnel(self.session, tunnel_id)
-
- ovs_db_v2.release_tunnel(self.session, tunnel_id, TUNNEL_RANGES)
- self.assertFalse(ovs_db_v2.get_tunnel_allocation(tunnel_id).allocated)
-
- def test_specific_tunnel_outside_pool(self):
- tunnel_id = TUN_MAX + 5
- self.assertIsNone(ovs_db_v2.get_tunnel_allocation(tunnel_id))
- ovs_db_v2.reserve_specific_tunnel(self.session, tunnel_id)
- self.assertTrue(ovs_db_v2.get_tunnel_allocation(tunnel_id).allocated)
-
- with testtools.ExpectedException(n_exc.TunnelIdInUse):
- ovs_db_v2.reserve_specific_tunnel(self.session, tunnel_id)
-
- ovs_db_v2.release_tunnel(self.session, tunnel_id, TUNNEL_RANGES)
- self.assertIsNone(ovs_db_v2.get_tunnel_allocation(tunnel_id))
-
- def test_add_tunnel_endpoint_create_new_endpoint(self):
- addr = '10.0.0.1'
- ovs_db_v2.add_tunnel_endpoint(addr)
- self.assertIsNotNone(self.session.query(ovs_models.TunnelEndpoint).
- filter_by(ip_address=addr).first())
-
- def test_add_tunnel_endpoint_retrieve_an_existing_endpoint(self):
- addr = '10.0.0.1'
- self.session.add(ovs_models.TunnelEndpoint(ip_address=addr, id=1))
- self.session.flush()
-
- tunnel = ovs_db_v2.add_tunnel_endpoint(addr)
- self.assertEqual(tunnel.id, 1)
- self.assertEqual(tunnel.ip_address, addr)
-
- def test_add_tunnel_endpoint_handle_duplicate_error(self):
- with mock.patch.object(session.Session, 'query') as query_mock:
- error = db_exc.DBDuplicateEntry(['id'])
- query_mock.side_effect = error
-
- with testtools.ExpectedException(n_exc.NeutronException):
- ovs_db_v2.add_tunnel_endpoint('10.0.0.1', 5)
- self.assertEqual(query_mock.call_count, 5)
-
-
-class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase):
- def setUp(self):
- cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:2999'],
- group='OVS')
- super(NetworkBindingsTest, self).setUp(plugin=PLUGIN_NAME)
- self.session = db.get_session()
-
- def test_add_network_binding(self):
- params = {'provider:network_type': 'vlan',
- 'provider:physical_network': PHYS_NET,
- 'provider:segmentation_id': 1234}
- params['arg_list'] = tuple(params.keys())
- with self.network(**params) as network:
- TEST_NETWORK_ID = network['network']['id']
- binding = ovs_db_v2.get_network_binding(self.session,
- TEST_NETWORK_ID)
- self.assertIsNotNone(binding)
- self.assertEqual(binding.network_id, TEST_NETWORK_ID)
- self.assertEqual(binding.network_type, 'vlan')
- self.assertEqual(binding.physical_network, PHYS_NET)
- self.assertEqual(binding.segmentation_id, 1234)
diff --git a/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py b/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py
index 3013761bab..64ba72a905 100644
--- a/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py
+++ b/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py
@@ -32,8 +32,7 @@ from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
-NOTIFIER = ('neutron.plugins.openvswitch.'
- 'ovs_neutron_plugin.AgentNotifierApi')
+NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
FAKE_MAC = '00:11:22:33:44:55'
diff --git a/neutron/tests/unit/openvswitch/test_ovs_rpcapi.py b/neutron/tests/unit/openvswitch/test_ovs_rpcapi.py
deleted file mode 100644
index c443d6cd98..0000000000
--- a/neutron/tests/unit/openvswitch/test_ovs_rpcapi.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2012, Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Unit Tests for openvswitch rpc
-"""
-
-import fixtures
-
-from neutron.agent import rpc as agent_rpc
-from neutron.common import topics
-from neutron.openstack.common import context
-from neutron.plugins.openvswitch.common import constants
-from neutron.plugins.openvswitch import ovs_neutron_plugin as povs
-from neutron.tests import base
-
-
-class rpcApiTestCase(base.BaseTestCase):
-
- def _test_ovs_api(self, rpcapi, topic, method, rpc_method, **kwargs):
- ctxt = context.RequestContext('fake_user', 'fake_project')
- expected_retval = 'foo' if method == 'call' else None
- expected_kwargs = {}
- if topic:
- expected_kwargs['topic'] = topic
- if 'version' in kwargs:
- expected_kwargs['version'] = kwargs.pop('version')
- expected_msg = rpcapi.make_msg(method, **kwargs)
- if rpc_method == 'cast' and method == 'run_instance':
- kwargs['call'] = False
-
- self.fake_args = None
- self.fake_kwargs = None
-
- def _fake_rpc_method(*args, **kwargs):
- self.fake_args = args
- self.fake_kwargs = kwargs
- if expected_retval:
- return expected_retval
-
- self.useFixture(fixtures.MonkeyPatch(
- 'neutron.common.rpc.RpcProxy.' + rpc_method,
- _fake_rpc_method))
-
- retval = getattr(rpcapi, method)(ctxt, **kwargs)
-
- self.assertEqual(retval, expected_retval)
- expected_args = [ctxt, expected_msg]
-
- # skip the first argument which is 'self'
- for arg, expected_arg in zip(self.fake_args[1:], expected_args):
- self.assertEqual(arg, expected_arg)
- self.assertEqual(expected_kwargs, self.fake_kwargs)
-
- def test_delete_network(self):
- rpcapi = povs.AgentNotifierApi(topics.AGENT)
- self._test_ovs_api(rpcapi,
- topics.get_topic_name(topics.AGENT,
- topics.NETWORK,
- topics.DELETE),
- 'network_delete', rpc_method='fanout_cast',
- network_id='fake_request_spec')
-
- def test_port_update(self):
- rpcapi = povs.AgentNotifierApi(topics.AGENT)
- self._test_ovs_api(rpcapi,
- topics.get_topic_name(topics.AGENT,
- topics.PORT,
- topics.UPDATE),
- 'port_update', rpc_method='fanout_cast',
- port='fake_port',
- network_type='fake_network_type',
- segmentation_id='fake_segmentation_id',
- physical_network='fake_physical_network')
-
- def test_tunnel_update(self):
- rpcapi = povs.AgentNotifierApi(topics.AGENT)
- self._test_ovs_api(rpcapi,
- topics.get_topic_name(topics.AGENT,
- constants.TUNNEL,
- topics.UPDATE),
- 'tunnel_update', rpc_method='fanout_cast',
- tunnel_ip='fake_ip', tunnel_id='fake_id',
- tunnel_type=None)
-
- def test_device_details(self):
- rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
- self._test_ovs_api(rpcapi, None,
- 'get_device_details', rpc_method='call',
- device='fake_device',
- agent_id='fake_agent_id',
- host='fake_host')
-
- def test_devices_details_list(self):
- rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
- self._test_ovs_api(rpcapi, None,
- 'get_devices_details_list', rpc_method='call',
- devices=['fake_device1', 'fake_device2'],
- agent_id='fake_agent_id', host='fake_host',
- version='1.3')
-
- def test_update_device_down(self):
- rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
- self._test_ovs_api(rpcapi, None,
- 'update_device_down', rpc_method='call',
- device='fake_device',
- agent_id='fake_agent_id',
- host='fake_host')
-
- def test_tunnel_sync(self):
- rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
- self._test_ovs_api(rpcapi, None,
- 'tunnel_sync', rpc_method='call',
- tunnel_ip='fake_tunnel_ip',
- tunnel_type=None)
-
- def test_update_device_up(self):
- rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
- self._test_ovs_api(rpcapi, None,
- 'update_device_up', rpc_method='call',
- device='fake_device',
- agent_id='fake_agent_id',
- host='fake_host')
diff --git a/neutron/tests/unit/openvswitch/test_ovs_security_group.py b/neutron/tests/unit/openvswitch/test_ovs_security_group.py
deleted file mode 100644
index 83f91cb7b9..0000000000
--- a/neutron/tests/unit/openvswitch/test_ovs_security_group.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from neutron.api.v2 import attributes
-from neutron.extensions import securitygroup as ext_sg
-from neutron import manager
-from neutron.tests.unit import test_extension_security_group as test_sg
-from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
-
-PLUGIN_NAME = ('neutron.plugins.openvswitch.'
- 'ovs_neutron_plugin.OVSNeutronPluginV2')
-NOTIFIER = ('neutron.plugins.openvswitch.'
- 'ovs_neutron_plugin.AgentNotifierApi')
-
-
-class OpenvswitchSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
- _plugin_name = PLUGIN_NAME
-
- def setUp(self, plugin=None):
- test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
- notifier_p = mock.patch(NOTIFIER)
- notifier_cls = notifier_p.start()
- self.notifier = mock.Mock()
- notifier_cls.return_value = self.notifier
- self._attribute_map_bk_ = {}
- for item in attributes.RESOURCE_ATTRIBUTE_MAP:
- self._attribute_map_bk_[item] = (attributes.
- RESOURCE_ATTRIBUTE_MAP[item].
- copy())
- super(OpenvswitchSecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
-
- def tearDown(self):
- super(OpenvswitchSecurityGroupsTestCase, self).tearDown()
- attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_
-
-
-class TestOpenvswitchSGServerRpcCallBack(
- OpenvswitchSecurityGroupsTestCase,
- test_sg_rpc.SGServerRpcCallBackTestCase):
- pass
-
-
-class TestOpenvswitchSGServerRpcCallBackXML(
- OpenvswitchSecurityGroupsTestCase,
- test_sg_rpc.SGServerRpcCallBackTestCaseXML):
- pass
-
-
-class TestOpenvswitchSecurityGroups(OpenvswitchSecurityGroupsTestCase,
- test_sg.TestSecurityGroups,
- test_sg_rpc.SGNotificationTestMixin):
- def test_security_group_get_port_from_device(self):
- with self.network() as n:
- with self.subnet(n):
- with self.security_group() as sg:
- security_group_id = sg['security_group']['id']
- res = self._create_port(self.fmt, n['network']['id'])
- port = self.deserialize(self.fmt, res)
- fixed_ips = port['port']['fixed_ips']
- data = {'port': {'fixed_ips': fixed_ips,
- 'name': port['port']['name'],
- ext_sg.SECURITYGROUPS:
- [security_group_id]}}
-
- req = self.new_update_request('ports', data,
- port['port']['id'])
- res = self.deserialize(self.fmt,
- req.get_response(self.api))
- port_id = res['port']['id']
- plugin = manager.NeutronManager.get_plugin()
- port_dict = plugin.get_port_from_device(port_id)
- self.assertEqual(port_id, port_dict['id'])
- self.assertEqual([security_group_id],
- port_dict[ext_sg.SECURITYGROUPS])
- self.assertEqual([], port_dict['security_group_rules'])
- self.assertEqual([fixed_ips[0]['ip_address']],
- port_dict['fixed_ips'])
- self._delete('ports', port_id)
-
- def test_security_group_get_port_from_device_with_no_port(self):
- plugin = manager.NeutronManager.get_plugin()
- port_dict = plugin.get_port_from_device('bad_device_id')
- self.assertIsNone(port_dict)
-
-
-class TestOpenvswitchSecurityGroupsXML(TestOpenvswitchSecurityGroups):
- fmt = 'xml'
diff --git a/neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py b/neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py
index 1d78c03d2f..7d083922f6 100644
--- a/neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py
+++ b/neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py
@@ -61,8 +61,7 @@ class LBaaSAgentSchedulerTestCase(test_agent_ext_plugin.AgentDBTestMixIn,
test_db_loadbalancer.LoadBalancerTestMixin,
test_plugin.NeutronDbPluginV2TestCase):
fmt = 'json'
- plugin_str = ('neutron.plugins.openvswitch.'
- 'ovs_neutron_plugin.OVSNeutronPluginV2')
+ plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP
diff --git a/neutron/tests/unit/test_extension_extended_attribute.py b/neutron/tests/unit/test_extension_extended_attribute.py
index 49f7dc32de..5b008c346c 100644
--- a/neutron/tests/unit/test_extension_extended_attribute.py
+++ b/neutron/tests/unit/test_extension_extended_attribute.py
@@ -26,7 +26,7 @@ from neutron.api.v2 import attributes
from neutron.common import config
from neutron import manager
from neutron.plugins.common import constants
-from neutron.plugins.openvswitch import ovs_neutron_plugin
+from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron import quota
from neutron.tests import base
from neutron.tests.unit.extensions import extendedattribute as extattr
@@ -41,7 +41,7 @@ extensions_path = ':'.join(neutron.tests.unit.extensions.__path__)
class ExtensionExtendedAttributeTestPlugin(
- ovs_neutron_plugin.OVSNeutronPluginV2):
+ ml2_plugin.Ml2Plugin):
supported_extension_aliases = [
'ext-obj-test', "extended-ext-attr"
diff --git a/neutron/tests/unit/test_l3_plugin.py b/neutron/tests/unit/test_l3_plugin.py
index 19838c3ec6..ecb1ead448 100644
--- a/neutron/tests/unit/test_l3_plugin.py
+++ b/neutron/tests/unit/test_l3_plugin.py
@@ -22,6 +22,7 @@ import netaddr
from oslo.config import cfg
from webob import exc
+from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.v2 import attributes
from neutron.common import constants as l3_constants
@@ -307,6 +308,13 @@ class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin,
supported_extension_aliases = ["router", "l3_agent_scheduler"]
+ def __init__(self):
+ super(TestL3NatAgentSchedulingServicePlugin, self).__init__()
+ self.router_scheduler = importutils.import_object(
+ cfg.CONF.router_scheduler_driver)
+ self.agent_notifiers.update(
+ {l3_constants.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
+
class L3NATdbonlyMixinTestCase(base.BaseTestCase):