summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSlawek Kaplonski <skaplons@redhat.com>2023-04-07 10:10:39 +0200
committerSlawek Kaplonski <skaplons@redhat.com>2023-04-21 16:22:42 +0200
commit670cc383e0345063c0bf9022f8829d5817457e00 (patch)
treef012101732ea0df493946b0ab93de10f4ab828d3
parent05ba4257dec5e80565aedb49685d8784424be461 (diff)
downloadneutron-670cc383e0345063c0bf9022f8829d5817457e00.tar.gz
[S-RBAC] Switch to new policies by default
As part of the Secure RBAC community goal, we should switch options "enforce_new_defaults" and "enforce_scope" to be True by default. It will be still possible to fallback to old policy rules by configuring those config options to False in Neutron config. Change-Id: I09c0026ccf87e6c0bb1fa59165c03dc508fba6fa
-rw-r--r--neutron/policy.py13
-rw-r--r--neutron/tests/functional/pecan_wsgi/test_functional.py7
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py1
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py21
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py27
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py6
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py16
-rw-r--r--neutron/tests/functional/plugins/ml2/test_plugin.py3
-rw-r--r--neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py13
-rw-r--r--neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py78
-rw-r--r--neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py9
-rw-r--r--neutron/tests/functional/services/ovn_l3/test_plugin.py2
-rw-r--r--neutron/tests/functional/services/portforwarding/test_port_forwarding.py3
-rw-r--r--neutron/tests/unit/_test_extension_portbindings.py107
-rw-r--r--neutron/tests/unit/api/test_extensions.py3
-rw-r--r--neutron/tests/unit/api/v2/test_base.py102
-rw-r--r--neutron/tests/unit/db/metering/test_metering_db.py42
-rw-r--r--neutron/tests/unit/db/test_agentschedulers_db.py63
-rw-r--r--neutron/tests/unit/db/test_db_base_plugin_v2.py567
-rw-r--r--neutron/tests/unit/db/test_dvr_mac_db.py6
-rw-r--r--neutron/tests/unit/db/test_ipam_backend_mixin.py6
-rw-r--r--neutron/tests/unit/db/test_ipam_pluggable_backend.py3
-rw-r--r--neutron/tests/unit/db/test_l3_db.py3
-rw-r--r--neutron/tests/unit/db/test_ovn_revision_numbers_db.py2
-rw-r--r--neutron/tests/unit/extensions/test_address_group.py5
-rw-r--r--neutron/tests/unit/extensions/test_address_scope.py39
-rw-r--r--neutron/tests/unit/extensions/test_agent.py17
-rw-r--r--neutron/tests/unit/extensions/test_availability_zone.py21
-rw-r--r--neutron/tests/unit/extensions/test_data_plane_status.py12
-rw-r--r--neutron/tests/unit/extensions/test_default_subnetpools.py8
-rw-r--r--neutron/tests/unit/extensions/test_dns.py4
-rw-r--r--neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py14
-rw-r--r--neutron/tests/unit/extensions/test_external_net.py23
-rw-r--r--neutron/tests/unit/extensions/test_extraroute.py19
-rw-r--r--neutron/tests/unit/extensions/test_flavors.py8
-rw-r--r--neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py31
-rw-r--r--neutron/tests/unit/extensions/test_l3.py341
-rw-r--r--neutron/tests/unit/extensions/test_l3_conntrack_helper.py21
-rw-r--r--neutron/tests/unit/extensions/test_l3_ext_gw_mode.py17
-rw-r--r--neutron/tests/unit/extensions/test_l3_ndp_proxy.py51
-rw-r--r--neutron/tests/unit/extensions/test_local_ip.py16
-rw-r--r--neutron/tests/unit/extensions/test_network_ip_availability.py86
-rw-r--r--neutron/tests/unit/extensions/test_network_segment_range.py34
-rw-r--r--neutron/tests/unit/extensions/test_portsecurity.py39
-rw-r--r--neutron/tests/unit/extensions/test_providernet.py12
-rw-r--r--neutron/tests/unit/extensions/test_qos_gateway_ip.py8
-rw-r--r--neutron/tests/unit/extensions/test_quotasv2.py72
-rw-r--r--neutron/tests/unit/extensions/test_securitygroup.py88
-rw-r--r--neutron/tests/unit/extensions/test_segment.py138
-rw-r--r--neutron/tests/unit/extensions/test_servicetype.py3
-rw-r--r--neutron/tests/unit/extensions/test_subnet_onboard.py4
-rw-r--r--neutron/tests/unit/extensions/test_subnet_service_types.py5
-rw-r--r--neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py4
-rw-r--r--neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py62
-rw-r--r--neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py163
-rw-r--r--neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py6
-rw-r--r--neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py6
-rw-r--r--neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py2
-rw-r--r--neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py22
-rw-r--r--neutron/tests/unit/plugins/ml2/test_extension_driver_api.py16
-rw-r--r--neutron/tests/unit/plugins/ml2/test_plugin.py211
-rw-r--r--neutron/tests/unit/plugins/ml2/test_port_binding.py36
-rw-r--r--neutron/tests/unit/plugins/ml2/test_security_group.py3
-rw-r--r--neutron/tests/unit/plugins/ml2/test_tracked_resources.py43
-rw-r--r--neutron/tests/unit/scheduler/test_l3_agent_scheduler.py9
-rw-r--r--neutron/tests/unit/services/metering/test_metering_plugin.py187
-rw-r--r--neutron/tests/unit/services/qos/test_qos_plugin.py12
-rw-r--r--neutron/tests/unit/services/revisions/test_revision_plugin.py8
-rw-r--r--releasenotes/notes/enable-enforce-scope-and-new-defaults-1f82a9eb71125f5d.yaml25
69 files changed, 1629 insertions, 1425 deletions
diff --git a/neutron/policy.py b/neutron/policy.py
index dca079b3c9..e4f0b093bd 100644
--- a/neutron/policy.py
+++ b/neutron/policy.py
@@ -51,12 +51,15 @@ _RESOURCE_FOREIGN_KEYS = {
'security_groups': 'security_group_id'
}
-
-# TODO(gmann): Remove setting the default value of config policy_file
-# once oslo_policy change the default value to 'policy.yaml'.
-# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
+# TODO(slaweq): Remove overriding the default value of config options
+# 'policy_file', 'enforce_scope', and 'enforce_new_defaults' once
+# oslo_policy change their default value to what is overridden here.
DEFAULT_POLICY_FILE = 'policy.yaml'
-opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
+opts.set_defaults(
+ cfg.CONF,
+ DEFAULT_POLICY_FILE,
+ enforce_scope=True,
+ enforce_new_defaults=True)
def reset():
diff --git a/neutron/tests/functional/pecan_wsgi/test_functional.py b/neutron/tests/functional/pecan_wsgi/test_functional.py
index f5b5c7eb33..4698acf201 100644
--- a/neutron/tests/functional/pecan_wsgi/test_functional.py
+++ b/neutron/tests/functional/pecan_wsgi/test_functional.py
@@ -43,8 +43,11 @@ class InjectContext(base.ConfigurableMiddleware):
# Determine the tenant
tenant_id = req.headers.get('X_PROJECT_ID')
- # Suck out the roles
- roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')]
+ roles = ['member', 'reader']
+ # Suck out the roles if any are set
+ custom_roles = req.headers.get('X_ROLES')
+ if custom_roles:
+ roles = [r.strip() for r in custom_roles.split(',')]
# Human-friendly names
tenant_name = req.headers.get('X_PROJECT_NAME')
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py
index 85f33b6276..8edcf60b11 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py
@@ -226,6 +226,7 @@ class TestOVNClientQosExtensionEndToEnd(TestOVNClientQosExtensionBase):
arg_list = arg_list + (pnet.PHYSICAL_NETWORK,)
net_arg[pnet.PHYSICAL_NETWORK] = physnet
network = self._make_network(self.fmt, name, True,
+ as_admin=True,
arg_list=arg_list, **net_arg)
if cidr:
self._make_subnet(self.fmt, network, gateway, cidr,
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py
index 6a23b3cc32..a4111d453a 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py
@@ -55,9 +55,9 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
return row
def _create_network(self, name, external=False):
- data = {'network': {'name': name, 'tenant_id': self._tenant_id,
- extnet_apidef.EXTERNAL: external}}
- req = self.new_create_request('networks', data, self.fmt)
+ data = {'network': {'name': name, extnet_apidef.EXTERNAL: external}}
+ req = self.new_create_request('networks', data, self.fmt,
+ as_admin=True)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['network']
@@ -70,7 +70,6 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
def _create_port(self, name, net_id, security_groups=None,
device_owner=None):
data = {'port': {'name': name,
- 'tenant_id': self._tenant_id,
'network_id': net_id}}
if security_groups is not None:
@@ -125,7 +124,6 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
data = {'subnet': {'name': name,
'network_id': net_id,
'ip_version': ip_version,
- 'tenant_id': self._tenant_id,
'cidr': cidr,
'enable_dhcp': True}}
data['subnet'].update(kwargs)
@@ -146,10 +144,13 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
return row
def _create_router(self, name, external_gateway_info=None):
- data = {'router': {'name': name, 'tenant_id': self._tenant_id}}
+ data = {'router': {'name': name}}
+ as_admin = False
if external_gateway_info is not None:
data['router']['external_gateway_info'] = external_gateway_info
- req = self.new_create_request('routers', data, self.fmt)
+ as_admin = bool(external_gateway_info.get('enable_snat'))
+ req = self.new_create_request('routers', data, self.fmt,
+ as_admin=as_admin)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['router']
@@ -167,7 +168,6 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
def _create_security_group(self):
data = {'security_group': {'name': 'sgtest',
- 'tenant_id': self._tenant_id,
'description': 'SpongeBob Rocks!'}}
req = self.new_create_request('security-groups', data, self.fmt)
res = req.get_response(self.api)
@@ -183,8 +183,7 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
'protocol': n_const.PROTO_NAME_TCP,
'ethertype': n_const.IPv4,
'port_range_min': 22,
- 'port_range_max': 22,
- 'tenant_id': self._tenant_id}}
+ 'port_range_max': 22}}
req = self.new_create_request('security-group-rules', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group_rule']
@@ -772,8 +771,8 @@ class TestMaintenance(_TestMaintenanceHelper):
p1 = self._create_port('testp1', net1['id'])
logical_ip = p1['fixed_ips'][0]['ip_address']
fip_info = {'floatingip': {
- 'description': 'test_fip',
'tenant_id': self._tenant_id,
+ 'description': 'test_fip',
'floating_network_id': ext_net['id'],
'port_id': p1['id'],
'fixed_ip_address': logical_ip}}
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py
index 892280f3bc..67aee6ba9a 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py
@@ -365,7 +365,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
port_req.get_response(self.api)
# External network and subnet
- e1 = self._make_network(self.fmt, 'e1', True,
+ e1 = self._make_network(self.fmt, 'e1', True, as_admin=True,
arg_list=('router:external',
'provider:network_type',
'provider:physical_network'),
@@ -1608,20 +1608,23 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
net = self.deserialize(self.fmt, res)['network']
self._create_subnet(self.fmt, net['id'], '10.0.0.0/24')
- res = self._create_qos_policy(self.fmt, 'qos_maxbw')
+ res = self._create_qos_policy(self.fmt, 'qos_maxbw', is_admin=True)
qos_maxbw = self.deserialize(self.fmt, res)['policy']
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
- max_kbps=1000, max_burst_kbps=800)
+ max_kbps=1000, max_burst_kbps=800,
+ is_admin=True)
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
direction=constants.INGRESS_DIRECTION,
- max_kbps=700, max_burst_kbps=600)
+ max_kbps=700, max_burst_kbps=600,
+ is_admin=True)
- res = self._create_qos_policy(self.fmt, 'qos_maxbw')
+ res = self._create_qos_policy(self.fmt, 'qos_maxbw', is_admin=True)
qos_dscp = self.deserialize(self.fmt, res)['policy']
self._create_qos_rule(self.fmt, qos_dscp['id'],
- qos_const.RULE_TYPE_DSCP_MARKING, dscp_mark=14)
+ qos_const.RULE_TYPE_DSCP_MARKING, dscp_mark=14,
+ is_admin=True)
res = self._create_port(
self.fmt, net['id'], arg_list=('qos_policy_id', ),
@@ -1677,7 +1680,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
{'floatingip': body})
def test_sync_fip_qos_policies(self):
- res = self._create_network(self.fmt, 'n1_ext', True,
+ res = self._create_network(self.fmt, 'n1_ext', True, as_admin=True,
arg_list=('router:external', ),
**{'router:external': True})
net_ext = self.deserialize(self.fmt, res)['network']
@@ -1687,15 +1690,17 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
net_int = self.deserialize(self.fmt, res)['network']
self._create_subnet(self.fmt, net_int['id'], '10.10.0.0/24')
- res = self._create_qos_policy(self.fmt, 'qos_maxbw')
+ res = self._create_qos_policy(self.fmt, 'qos_maxbw', is_admin=True)
qos_maxbw = self.deserialize(self.fmt, res)['policy']
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
- max_kbps=1000, max_burst_kbps=800)
+ max_kbps=1000, max_burst_kbps=800,
+ is_admin=True)
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
direction=constants.INGRESS_DIRECTION,
- max_kbps=700, max_burst_kbps=600)
+ max_kbps=700, max_burst_kbps=600,
+ is_admin=True)
# Create a router with net_ext as GW network and net_int as internal
# one, and a floating IP on the external network.
@@ -1750,7 +1755,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
self._validate_qos_records()
def test_fip_nat_revert_to_stateful(self):
- res = self._create_network(self.fmt, 'n1_ext', True,
+ res = self._create_network(self.fmt, 'n1_ext', True, as_admin=True,
arg_list=('router:external', ),
**{'router:external': True})
net_ext = self.deserialize(self.fmt, res)['network']
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py
index 5a6073cfb2..1eea038c01 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py
@@ -103,12 +103,13 @@ class TestNBDbMonitor(base.TestOVNFunctionalBase):
allowedaddresspairs.ADDRESS_PAIRS: allowed_address_pairs
}
port_res = self._create_port(self.fmt, self.net['network']['id'],
+ is_admin=True,
arg_list=arg_list, **host_arg)
port = self.deserialize(self.fmt, port_res)['port']
return port
def _create_fip(self, port, fip_address):
- e1 = self._make_network(self.fmt, 'e1', True,
+ e1 = self._make_network(self.fmt, 'e1', True, as_admin=True,
arg_list=('router:external',
'provider:network_type',
'provider:physical_network'),
@@ -403,7 +404,8 @@ class TestSBDbMonitor(base.TestOVNFunctionalBase, test_l3.L3NatTestCaseMixin):
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
+ ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
+ **kwargs)
self._make_subnet(self.fmt, ext_net, '10.251.0.1', '10.251.0.0/24',
enable_dhcp=True)
router = self._make_router(self.fmt, self._tenant_id)
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
index ee92f68dbd..e1abd67c8e 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
@@ -101,20 +101,21 @@ class TestPortBinding(base.TestOVNFunctionalBase):
'network_id': self.n1['network']['id'],
'tenant_id': self._tenant_id})
- port_req = self.new_create_request('ports', port_data, self.fmt)
+ port_req = self.new_create_request('ports', port_data, self.fmt,
+ as_admin=True)
port_res = port_req.get_response(self.api)
p = self.deserialize(self.fmt, port_res)
port_id = p['port']['id']
else:
port_req = self.new_update_request('ports', port_data, port_id,
- self.fmt)
+ self.fmt, as_admin=True)
port_res = port_req.get_response(self.api)
self.deserialize(self.fmt, port_res)
return port_id
def _port_show(self, port_id):
- port_req = self.new_show_request('ports', port_id)
+ port_req = self.new_show_request('ports', port_id, as_admin=True)
port_res = port_req.get_response(self.api)
return self.deserialize(self.fmt, port_res)
@@ -715,13 +716,13 @@ class TestExternalPorts(base.TestOVNFunctionalBase):
def _test_external_port_create_switchdev(self, vnic_type):
port_data = {
'port': {'network_id': self.n1['network']['id'],
- 'tenant_id': self._tenant_id,
portbindings.VNIC_TYPE: vnic_type,
ovn_const.OVN_PORT_BINDING_PROFILE: {
ovn_const.PORT_CAP_PARAM: [
ovn_const.PORT_CAP_SWITCHDEV]}}}
- port_req = self.new_create_request('ports', port_data, self.fmt)
+ port_req = self.new_create_request('ports', port_data, self.fmt,
+ as_admin=True)
port_res = port_req.get_response(self.api)
port = self.deserialize(self.fmt, port_res)['port']
@@ -769,7 +770,8 @@ class TestExternalPorts(base.TestOVNFunctionalBase):
ovn_const.PORT_CAP_PARAM: [
ovn_const.PORT_CAP_SWITCHDEV]}}}
port_req = self.new_update_request(
- 'ports', port_upt_data, port['id'], self.fmt)
+ 'ports', port_upt_data, port['id'], self.fmt,
+ as_admin=True)
port_res = port_req.get_response(self.api)
port = self.deserialize(self.fmt, port_res)['port']
@@ -948,7 +950,7 @@ class TestProvnetPorts(base.TestOVNFunctionalBase):
def test_network_segments_localnet_ports(self):
n1 = self._make_network(
- self.fmt, 'n1', True,
+ self.fmt, 'n1', True, as_admin=True,
arg_list=('provider:network_type',
'provider:segmentation_id',
'provider:physical_network'),
diff --git a/neutron/tests/functional/plugins/ml2/test_plugin.py b/neutron/tests/functional/plugins/ml2/test_plugin.py
index 667ef651a4..16fde03b09 100644
--- a/neutron/tests/functional/plugins/ml2/test_plugin.py
+++ b/neutron/tests/functional/plugins/ml2/test_plugin.py
@@ -50,6 +50,7 @@ class TestMl2PortBinding(ml2_test_base.ML2TestFramework,
with self.subnet(network=network) as subnet:
with self.port(
subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE,
+ is_admin=True,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**self.host_args) as port:
# Note: Port creation invokes _bind_port_if_needed(),
@@ -65,6 +66,7 @@ class TestMl2PortBinding(ml2_test_base.ML2TestFramework,
with self.subnet(network=network) as subnet:
with self.port(
subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE,
+ is_admin=True,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**self.host_args) as port:
# Since the agent is dead, expect binding to fail
@@ -88,6 +90,7 @@ class TestMl2PortBinding(ml2_test_base.ML2TestFramework,
with self.subnet(network=network) as subnet:
with self.port(
subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE,
+ is_admin=True,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**self.host_args) as port:
pass
diff --git a/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py b/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py
index 0b6019fefb..ac2aac1cdc 100644
--- a/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py
+++ b/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py
@@ -134,21 +134,25 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
self.subnet(cidr='30.0.0.0/24') as subnet2, \
self.subnet(cidr='40.0.0.0/24') as subnet3, \
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}), \
self.port(subnet=subnet2,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST2}), \
self.port(subnet=subnet3,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST3}):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client,
'prepare') as mock_prepare:
# add external gateway to router
@@ -231,7 +235,7 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.subnet() as subnet, \
- self.network(**kwargs) as ext_net, \
+ self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net, cidr='20.0.0.0/24'):
gw_info = {'network_id': ext_net['network']['id']}
self.l3_plugin.update_router(
@@ -256,7 +260,7 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
router = self._create_router(distributed=True, ha=True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net, \
+ with self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net), \
self.subnet(cidr='20.0.0.0/24') as subnet, \
self.port(subnet=subnet,
@@ -300,7 +304,8 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
def _create_external_network(self):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
+ ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
diff --git a/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py b/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py
index 17e37cda8d..097be093a3 100644
--- a/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py
+++ b/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py
@@ -24,7 +24,6 @@ from neutron_lib.callbacks import resources
from neutron_lib.db import api as db_api
from neutron_lib import constants
-from neutron_lib import context
from neutron.api.rpc.handlers import l3_rpc
from neutron.tests.common import helpers
@@ -112,7 +111,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
subnet2 = self._make_subnet(
self.fmt, net1, '10.2.0.1', '10.2.0.0/24', enable_dhcp=True)
- ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
+ ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True)
# Create first router and add an interface
@@ -170,7 +170,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='20.0.0.0/24') as subnet2:
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net, \
+ with self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net,
cidr='30.0.0.0/24'):
router = self._create_router()
@@ -287,7 +287,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
subnet2 = self._make_subnet(
self.fmt, net2, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
- ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
+ ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True)
# Create first router and add an interface
@@ -358,7 +359,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
router = self._create_router(distributed=dvr)
self.l3_plugin.update_router(
@@ -447,7 +449,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
router1 = self._create_router(distributed=dvr)
router2 = self._create_router(distributed=dvr)
@@ -559,7 +562,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
router = self._create_router(distributed=dvr)
self.l3_plugin.update_router(
@@ -636,7 +640,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
def test_router_with_ipv4_and_multiple_ipv6_on_same_network(self):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -710,7 +715,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -820,7 +826,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -904,7 +911,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -982,7 +990,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -1067,7 +1076,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -1200,7 +1210,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -1243,7 +1254,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -1382,7 +1394,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
router = self._create_router()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net,\
+ with self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet):
@@ -1412,7 +1424,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
router = self._create_router()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net,\
+ with self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet,
@@ -1450,7 +1462,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
helpers.register_l3_agent(
host=HOST, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
- with self.network(shared=True) as net,\
+ with self.network(as_admin=True, shared=True) as net,\
self.subnet(network=net) as subnet,\
self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
@@ -1465,9 +1477,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
with mock.patch.object(self.l3_plugin.l3_rpc_notifier,
'router_removed_from_agent') as remove_mock:
- ctx = context.Context(
- '', non_admin_tenant) if non_admin_port else self.context
- self._delete('ports', port['port']['id'], neutron_context=ctx)
+ self._delete('ports', port['port']['id'],
+ tenant_id=non_admin_tenant)
remove_mock.assert_called_once_with(
mock.ANY, router['id'], HOST)
@@ -1501,13 +1512,15 @@ class L3DvrTestCase(L3DvrTestCaseBase):
with self.subnet() as ext_subnet,\
self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}) as vm_port:
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
# add external gateway to router
self.l3_plugin.update_router(
self.context, router['id'],
@@ -1576,21 +1589,25 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.subnet(cidr='40.0.0.0/24') as subnet3,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}),\
self.port(subnet=subnet2,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST2}),\
self.port(subnet=subnet3,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST3}):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client,
'prepare') as mock_prepare:
@@ -1661,7 +1678,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.subnet() as subnet,\
- self.network(**kwargs) as ext_net,\
+ self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net, cidr='20.0.0.0/24'):
gw_info = {'network_id': ext_net['network']['id']}
request_body = {
@@ -1693,7 +1710,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
router = self._create_router()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net,\
+ with self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet,
@@ -1796,10 +1813,12 @@ class L3DvrTestCase(L3DvrTestCaseBase):
with self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: host}),\
self.port(subnet=subnet2,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
@@ -1834,10 +1853,12 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: host}),\
self.port(subnet=subnet2,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
@@ -1883,7 +1904,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
# add external gateway to router
self.l3_plugin.update_router(
self.context, router3['id'],
@@ -1915,6 +1937,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.port(subnet=wan_subnet) as wan_port1,\
self.port(subnet=wan_subnet) as wan_port2,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
@@ -1958,6 +1981,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
arg_list = (portbindings.HOST_ID,)
with self.subnet() as subnet,\
self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}):
@@ -2067,7 +2091,7 @@ class L3DvrTestCaseMigration(L3DvrTestCaseBase):
with self.subnet() as subnet1:
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net, \
+ with self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net,
cidr='30.0.0.0/24'):
router = self._create_router(distributed=False)
diff --git a/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py b/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py
index 57c7978b8f..fb5bd58c68 100644
--- a/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py
+++ b/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py
@@ -28,7 +28,7 @@ class LogApiTestCaseBase(functional_base.TestOVNFunctionalBase):
super().setUp()
self.log_driver = self.mech_driver.log_driver
self._check_is_supported()
- self.ctxt = context.Context('admin', 'fake_tenant')
+ self.ctxt = context.Context('admin', self._tenant_id)
def _check_is_supported(self):
if not self.log_driver.network_logging_supported(self.nb_api):
@@ -110,7 +110,6 @@ class LogApiTestCaseComplex(LogApiTestCaseBase):
def _create_port(self, name, net_id, security_groups):
data = {'port': {'name': name,
- 'tenant_id': self.ctxt.project_id,
'network_id': net_id,
'security_groups': security_groups}}
req = self.new_create_request('ports', data, self.fmt)
@@ -118,8 +117,7 @@ class LogApiTestCaseComplex(LogApiTestCaseBase):
return self.deserialize(self.fmt, res)['port']['id']
def _create_security_group(self, name):
- data = {'security_group': {'name': name,
- 'tenant_id': self.ctxt.project_id}}
+ data = {'security_group': {'name': name}}
req = self.new_create_request('security-groups', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group']['id']
@@ -130,8 +128,7 @@ class LogApiTestCaseComplex(LogApiTestCaseBase):
'protocol': n_const.PROTO_NAME_TCP,
'ethertype': n_const.IPv4,
'port_range_min': tcp_port,
- 'port_range_max': tcp_port,
- 'tenant_id': self.ctxt.project_id}}
+ 'port_range_max': tcp_port}}
req = self.new_create_request('security-group-rules', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group_rule']['id']
diff --git a/neutron/tests/functional/services/ovn_l3/test_plugin.py b/neutron/tests/functional/services/ovn_l3/test_plugin.py
index 91859c02ef..fbaa0b8ade 100644
--- a/neutron/tests/functional/services/ovn_l3/test_plugin.py
+++ b/neutron/tests/functional/services/ovn_l3/test_plugin.py
@@ -63,7 +63,7 @@ class TestRouter(base.TestOVNFunctionalBase):
if physnet:
arg_list = arg_list + (pnet.PHYSICAL_NETWORK,)
net_arg[pnet.PHYSICAL_NETWORK] = physnet
- network = self._make_network(self.fmt, name, True,
+ network = self._make_network(self.fmt, name, True, as_admin=True,
arg_list=arg_list, **net_arg)
if cidr:
self._make_subnet(self.fmt, network, gateway, cidr,
diff --git a/neutron/tests/functional/services/portforwarding/test_port_forwarding.py b/neutron/tests/functional/services/portforwarding/test_port_forwarding.py
index 9881c1c4a0..e0b4dbf733 100644
--- a/neutron/tests/functional/services/portforwarding/test_port_forwarding.py
+++ b/neutron/tests/functional/services/portforwarding/test_port_forwarding.py
@@ -98,7 +98,8 @@ class PortForwardingTestCase(PortForwardingTestCaseBase):
def _prepare_env(self):
self.router = self._create_router(distributed=True)
self.ext_net = self._create_network(
- self.fmt, 'ext-net', True, arg_list=("router:external",),
+ self.fmt, 'ext-net', True, as_admin=True,
+ arg_list=("router:external",),
**{"router:external": True}).json['network']
self.ext_subnet = self._create_subnet(
self.fmt, self.ext_net['id'], '172.24.2.0/24').json['subnet']
diff --git a/neutron/tests/unit/_test_extension_portbindings.py b/neutron/tests/unit/_test_extension_portbindings.py
index ea18b96e10..763e28b6d8 100644
--- a/neutron/tests/unit/_test_extension_portbindings.py
+++ b/neutron/tests/unit/_test_extension_portbindings.py
@@ -55,24 +55,16 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertNotIn(portbindings.VIF_TYPE, port)
self.assertNotIn(portbindings.VIF_DETAILS, port)
- def _get_non_admin_context(self):
- return context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
-
def test_port_vif_details(self):
- with self.port(name='name') as port:
+ with self.port(is_admin=True, name='name') as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings(port['port'])
# Check a response of get_port
- ctx = context.get_admin_context()
- port = self._show('ports', port_id, neutron_context=ctx)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
- ctx = self._get_non_admin_context()
- non_admin_port = self._show(
- 'ports', port_id, neutron_context=ctx)['port']
+ non_admin_port = self._show('ports', port_id)['port']
self._check_response_no_portbindings(non_admin_port)
def test_ports_vif_details(self):
@@ -83,9 +75,7 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertEqual(len(ports), 2)
for port in ports:
self._check_response_portbindings(port)
- # By default user is admin - now test non admin user
- ctx = self._get_non_admin_context()
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports')['ports']
self.assertEqual(len(ports), 2)
for non_admin_port in ports:
self._check_response_no_portbindings(non_admin_port)
@@ -97,11 +87,12 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_create_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
- with self.port(arg_list=(portbindings.PROFILE,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
port_id = port['port']['id']
self._check_port_binding_profile(port['port'], profile)
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self._check_port_binding_profile(port['port'], profile)
def test_create_port_binding_profile_none(self):
@@ -112,14 +103,13 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_update_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
- with self.port() as port:
+ with self.port(is_admin=True) as port:
self._check_port_binding_profile(port['port'])
port_id = port['port']['id']
- ctx = context.get_admin_context()
port = self._update('ports', port_id, {'port': profile_arg},
- neutron_context=ctx)['port']
+ as_admin=True)['port']
self._check_port_binding_profile(port, profile)
- port = self._show('ports', port_id)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_profile_none(self):
@@ -131,18 +121,16 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def test_port_create_portinfo_non_admin(self):
profile_arg = {portbindings.PROFILE: {dummy_plugin.RESOURCE_NAME:
dummy_plugin.RESOURCE_NAME}}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
# succeed without binding:profile
- with self.port(subnet=subnet1,
- set_context=True, tenant_id='test'):
+ with self.port(subnet=subnet1):
pass
# fail with binding:profile
try:
with self.port(subnet=subnet1,
expected_res_status=403,
arg_list=(portbindings.PROFILE,),
- set_context=True, tenant_id='test',
**profile_arg):
pass
except exc.HTTPClientError:
@@ -156,11 +144,9 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
with self.port(subnet=subnet1) as port:
# By default user is admin - now test non admin user
port_id = port['port']['id']
- ctx = self._get_non_admin_context()
port = self._update('ports', port_id,
{'port': profile_arg},
- expected_code=exc.HTTPForbidden.code,
- neutron_context=ctx)
+ expected_code=exc.HTTPForbidden.code)
class PortBindingsHostTestCaseMixin(object):
@@ -192,74 +178,70 @@ class PortBindingsHostTestCaseMixin(object):
def test_port_vif_host(self):
host_arg = {portbindings.HOST_ID: self.hostname}
- with self.port(name='name', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings_host(port['port'])
# Check a response of get_port
- ctx = context.get_admin_context()
- port = self._show('ports', port_id, neutron_context=ctx)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_response_portbindings_host(port)
- # By default user is admin - now test non admin user
- ctx = context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
- non_admin_port = self._show(
- 'ports', port_id, neutron_context=ctx)['port']
+ non_admin_port = self._show('ports', port_id)['port']
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name1',
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg), self.port(name='name2'):
- ctx = context.get_admin_context()
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports', as_admin=True)['ports']
self.assertEqual(2, len(ports))
for port in ports:
if port['name'] == 'name1':
self._check_response_portbindings_host(port)
else:
self.assertFalse(port[portbindings.HOST_ID])
- # By default user is admin - now test non admin user
- ctx = context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports')['ports']
self.assertEqual(2, len(ports))
for non_admin_port in ports:
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host_update(self):
host_arg = {portbindings.HOST_ID: self.hostname}
- with self.port(name='name1', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name1', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port1, self.port(name='name2') as port2:
data = {'port': {portbindings.HOST_ID: 'testhosttemp'}}
- req = self.new_update_request('ports', data, port1['port']['id'])
+ req = self.new_update_request('ports', data, port1['port']['id'],
+ as_admin=True)
req.get_response(self.api)
- req = self.new_update_request('ports', data, port2['port']['id'])
- ctx = context.get_admin_context()
+ req = self.new_update_request('ports', data, port2['port']['id'],
+ as_admin=True)
req.get_response(self.api)
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports', as_admin=True)['ports']
self.assertEqual(2, len(ports))
for port in ports:
self.assertEqual('testhosttemp', port[portbindings.HOST_ID])
def test_ports_vif_non_host_update(self):
host_arg = {portbindings.HOST_ID: self.hostname}
- with self.port(name='name', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
data = {'port': {'admin_state_up': False}}
- req = self.new_update_request('ports', data, port['port']['id'])
+ req = self.new_update_request('ports', data, port['port']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][portbindings.HOST_ID],
res['port'][portbindings.HOST_ID])
def test_ports_vif_non_host_update_when_host_null(self):
- with self.port() as port:
+ with self.port(is_admin=True) as port:
data = {'port': {'admin_state_up': False}}
- req = self.new_update_request('ports', data, port['port']['id'])
+ req = self.new_update_request('ports', data, port['port']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][portbindings.HOST_ID],
res['port'][portbindings.HOST_ID])
@@ -267,10 +249,12 @@ class PortBindingsHostTestCaseMixin(object):
def test_ports_vif_host_list(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name1',
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1,\
self.port(name='name2'),\
self.port(name='name3',
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
self._test_list_resources(
@@ -308,23 +292,16 @@ class PortBindingsVnicTestCaseMixin(object):
# Check a response of create_port
self._check_response_portbindings_vnic_type(port['port'])
# Check a response of get_port
- ctx = context.get_admin_context()
- port = self._show('ports', port_id, neutron_context=ctx)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_response_portbindings_vnic_type(port)
- # By default user is admin - now test non admin user
- ctx = context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
- non_admin_port = self._show(
- 'ports', port_id, neutron_context=ctx)['port']
+ non_admin_port = self._show('ports', port_id)['port']
self._check_response_portbindings_vnic_type(non_admin_port)
def test_ports_vnic_type(self):
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
with self.port(name='name1', arg_list=(portbindings.VNIC_TYPE,),
**vnic_arg), self.port(name='name2'):
- ctx = context.get_admin_context()
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports', as_admin=True)['ports']
self.assertEqual(2, len(ports))
for port in ports:
if port['name'] == 'name1':
@@ -332,11 +309,7 @@ class PortBindingsVnicTestCaseMixin(object):
else:
self.assertEqual(portbindings.VNIC_NORMAL,
port[portbindings.VNIC_TYPE])
- # By default user is admin - now test non admin user
- ctx = context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports')['ports']
self.assertEqual(2, len(ports))
for non_admin_port in ports:
self._check_response_portbindings_vnic_type(non_admin_port)
diff --git a/neutron/tests/unit/api/test_extensions.py b/neutron/tests/unit/api/test_extensions.py
index 7e15bc4077..14155f1b02 100644
--- a/neutron/tests/unit/api/test_extensions.py
+++ b/neutron/tests/unit/api/test_extensions.py
@@ -17,6 +17,7 @@ import copy
from unittest import mock
import fixtures
+from neutron_lib import context
from neutron_lib import exceptions
from neutron_lib.plugins import constants as lib_const
from neutron_lib.plugins import directory
@@ -1045,6 +1046,8 @@ class ExtensionExtendedAttributeTestCase(base.BaseTestCase):
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
+ req.environ['neutron.context'] = context.Context(
+ '', self._tenant_id, roles=['member', 'reader'])
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
diff --git a/neutron/tests/unit/api/v2/test_base.py b/neutron/tests/unit/api/v2/test_base.py
index f0cb1f1a26..2e2a32e8e0 100644
--- a/neutron/tests/unit/api/v2/test_base.py
+++ b/neutron/tests/unit/api/v2/test_base.py
@@ -74,6 +74,14 @@ def _get_path(resource, id=None, action=None,
return path
+def _get_neutron_env(tenant_id=None, as_admin=False):
+ tenant_id = tenant_id or _uuid()
+ roles = ['member', 'reader']
+ if as_admin:
+ roles.append('admin')
+ return {'neutron.context': context.Context('', tenant_id, roles=roles)}
+
+
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
@@ -98,6 +106,8 @@ class APIv2TestBase(base.BaseTestCase):
api = router.APIRouter()
self.api = webtest.TestApp(api)
+ self._tenant_id = "api-test-tenant"
+
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', quota_conf.QUOTA_DB_DRIVER,
group='QUOTAS')
@@ -105,6 +115,27 @@ class APIv2TestBase(base.BaseTestCase):
# APIRouter initialization resets policy module, re-initializing it
policy.init()
+ def _post_request(self, path, initial_input, expect_errors=None,
+ req_tenant_id=None, as_admin=False):
+ req_tenant_id = req_tenant_id or self._tenant_id
+ return self.api.post_json(
+ path, initial_input, expect_errors=expect_errors,
+ extra_environ=_get_neutron_env(req_tenant_id, as_admin))
+
+ def _put_request(self, path, initial_input, expect_errors=None,
+ req_tenant_id=None, as_admin=False):
+ req_tenant_id = req_tenant_id or self._tenant_id
+ return self.api.put_json(
+ path, initial_input, expect_errors=expect_errors,
+ extra_environ=_get_neutron_env(req_tenant_id, as_admin))
+
+ def _delete_request(self, path, expect_errors=None,
+ req_tenant_id=None, as_admin=False):
+ req_tenant_id = req_tenant_id or self._tenant_id
+ return self.api.delete_json(
+ path, expect_errors=expect_errors,
+ extra_environ=_get_neutron_env(req_tenant_id, as_admin))
+
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
@@ -512,17 +543,16 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
- env = {'neutron.context': context.Context('', req_tenant_id)}
+ env = _get_neutron_env(req_tenant_id)
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
- 'tenant_id': real_tenant_id,
+ 'project_id': real_tenant_id,
'shared': False,
'subnets': []}
- return_value = [input_dict]
instance = self.plugin.return_value
- instance.get_networks.return_value = return_value
+ instance.get_networks.return_value = [input_dict]
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
@@ -789,7 +819,7 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
- env = {'neutron.context': context.Context('', tenant_id)}
+ env = _get_neutron_env(tenant_id)
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
@@ -947,8 +977,9 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def test_create_return_extra_attr(self):
net_id = _uuid()
+ project_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
- 'tenant_id': _uuid()}}
+ 'tenant_id': project_id}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
@@ -959,7 +990,8 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
- content_type='application/' + self.fmt)
+ content_type='application/' + self.fmt,
+ extra_environ=_get_neutron_env(project_id))
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
@@ -969,23 +1001,25 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
+ project_id = _uuid()
return_value = {'name': 'net1', 'admin_state_up': True,
- 'subnets': []}
+ 'project_id': project_id, 'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
- fmt=self.fmt))
+ fmt=self.fmt),
+ extra_environ=_get_neutron_env(project_id))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
- env = {'neutron.context': context.Context('', req_tenant_id)}
+ env = _get_neutron_env(req_tenant_id)
instance = self.plugin.return_value
- instance.get_network.return_value = {'tenant_id': real_tenant_id,
+ instance.get_network.return_value = {'project_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
@@ -1010,15 +1044,12 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
+ shared = req_tenant_id and req_tenant_id.endswith('another')
env = {}
- shared = False
if req_tenant_id:
- env = {'neutron.context': context.Context('', req_tenant_id)}
- if req_tenant_id.endswith('another'):
- shared = True
- env['neutron.context'].roles = ['tenant_admin']
+ env = _get_neutron_env(req_tenant_id)
- data = {'tenant_id': real_tenant_id, 'shared': shared}
+ data = {'project_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
@@ -1060,14 +1091,14 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
expect_errors=False):
env = {}
if req_tenant_id:
- env = {'neutron.context': context.Context('', req_tenant_id)}
+ env = _get_neutron_env(req_tenant_id)
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
- instance.get_network.return_value = {'tenant_id': real_tenant_id,
+ instance.get_network.return_value = {'project_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
@@ -1308,26 +1339,31 @@ class NotificationTest(APIv2TestBase):
group='QUOTAS')
def _resource_op_notifier(self, opname, resource, expected_errors=False):
- initial_input = {resource: {'name': 'myname'}}
+ tenant_id = _uuid()
+ network_obj = {'name': 'myname',
+ 'project_id': tenant_id}
+ initial_input = {resource: network_obj}
instance = self.plugin.return_value
- instance.get_networks.return_value = initial_input
+ instance.get_network.return_value = network_obj
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
- initial_input[resource]['tenant_id'] = _uuid()
- res = self.api.post_json(
+ res = self._post_request(
_get_path('networks'),
- initial_input, expect_errors=expected_errors)
+ initial_input, expect_errors=expected_errors,
+ req_tenant_id=tenant_id)
if opname == 'update':
- res = self.api.put_json(
- _get_path('networks', id=_uuid()),
- initial_input, expect_errors=expected_errors)
+ op_input = {resource: {'name': 'myname'}}
+ res = self._put_request(
+ _get_path('networks', id=tenant_id),
+ op_input, expect_errors=expected_errors,
+ req_tenant_id=tenant_id)
expected_code = exc.HTTPOk.code
if opname == 'delete':
- initial_input[resource]['tenant_id'] = _uuid()
- res = self.api.delete(
- _get_path('networks', id=_uuid()),
- expect_errors=expected_errors)
+ res = self._delete_request(
+ _get_path('networks', id=tenant_id),
+ expect_errors=expected_errors,
+ req_tenant_id=tenant_id)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
@@ -1472,7 +1508,9 @@ class ExtensionTestCase(base.BaseTestCase):
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
- res = self.api.post_json(_get_path('networks'), initial_input)
+ res = self.api.post_json(
+ _get_path('networks'), initial_input,
+ extra_environ=_get_neutron_env(tenant_id))
instance.create_network.assert_called_with(mock.ANY,
network=data)
diff --git a/neutron/tests/unit/db/metering/test_metering_db.py b/neutron/tests/unit/db/metering/test_metering_db.py
index 35d7f733c1..1c2af72a4e 100644
--- a/neutron/tests/unit/db/metering/test_metering_db.py
+++ b/neutron/tests/unit/db/metering/test_metering_db.py
@@ -16,7 +16,6 @@ import contextlib
from neutron_lib.api.definitions import metering as metering_apidef
from neutron_lib import constants as n_consts
-from neutron_lib import context
from neutron_lib.db import constants as db_const
from neutron_lib.plugins import constants
from oslo_utils import uuidutils
@@ -42,18 +41,12 @@ _fake_uuid = uuidutils.generate_uuid
class MeteringPluginDbTestCaseMixin(object):
def _create_metering_label(self, fmt, name, description, **kwargs):
data = {'metering_label': {'name': name,
- 'tenant_id': kwargs.get('tenant_id',
- 'test-tenant'),
'shared': kwargs.get('shared', False),
'description': description}}
- req = self.new_create_request('metering-labels', data,
- fmt)
-
- if kwargs.get('set_context') and 'tenant_id' in kwargs:
- # create a specific auth context for this request
- req.environ['neutron.context'] = (
- context.Context('', kwargs['tenant_id'],
- is_admin=kwargs.get('is_admin', True)))
+ req = self.new_create_request(
+ 'metering-labels', data, fmt,
+ tenant_id=kwargs.get('tenant_id', self._tenant_id),
+ as_admin=kwargs.get('is_admin', True))
return req.get_response(self.ext_api)
@@ -71,7 +64,6 @@ class MeteringPluginDbTestCaseMixin(object):
data = {
'metering_label_rule': {
'metering_label_id': metering_label_id,
- 'tenant_id': kwargs.get('tenant_id', 'test-tenant'),
'direction': direction,
'excluded': excluded,
}
@@ -87,13 +79,10 @@ class MeteringPluginDbTestCaseMixin(object):
data['metering_label_rule']['destination_ip_prefix'] =\
destination_ip_prefix
- req = self.new_create_request('metering-label-rules',
- data, fmt)
-
- if kwargs.get('set_context') and 'tenant_id' in kwargs:
- # create a specific auth context for this request
- req.environ['neutron.context'] = (
- context.Context('', kwargs['tenant_id']))
+ req = self.new_create_request(
+ 'metering-label-rules', data, fmt,
+ tenant_id=kwargs.get('tenant_id', self._tenant_id),
+ as_admin=kwargs.get('is_admin', True))
return req.get_response(self.ext_api)
@@ -203,7 +192,8 @@ class TestMetering(MeteringPluginDbTestCase):
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
- self._delete('metering-labels', metering_label_id, 204)
+ self._delete('metering-labels', metering_label_id, 204,
+ as_admin=True)
def test_list_metering_label(self):
name = 'my label'
@@ -258,7 +248,7 @@ class TestMetering(MeteringPluginDbTestCase):
remote_ip_prefix=remote_ip_prefix) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._update('metering-label-rules', rule_id, data,
- webob.exc.HTTPNotImplemented.code)
+ webob.exc.HTTPNotImplemented.code, as_admin=True)
def test_delete_metering_label_rule(self):
name = 'my label'
@@ -275,7 +265,8 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_id, direction, excluded,
remote_ip_prefix=remote_ip_prefix) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
- self._delete('metering-label-rules', rule_id, 204)
+ self._delete('metering-label-rules', rule_id, 204,
+ as_admin=True)
def test_list_metering_label_rule(self):
name = 'my label'
@@ -297,7 +288,7 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
- metering_label_rule)
+ metering_label_rule, as_admin=True)
def test_create_metering_label_rules(self):
name = 'my label'
@@ -319,7 +310,7 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
- metering_label_rule)
+ metering_label_rule, as_admin=True)
def test_create_overlap_metering_label_rules(self):
name = 'my label'
@@ -365,4 +356,5 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
- metering_label_rule)
+ metering_label_rule,
+ as_admin=True)
diff --git a/neutron/tests/unit/db/test_agentschedulers_db.py b/neutron/tests/unit/db/test_agentschedulers_db.py
index d503aee1ec..df4edfaa9b 100644
--- a/neutron/tests/unit/db/test_agentschedulers_db.py
+++ b/neutron/tests/unit/db/test_agentschedulers_db.py
@@ -45,6 +45,7 @@ from neutron.db.models import agent as agent_model
from neutron.extensions import l3agentscheduler
from neutron.objects import agent as ag_obj
from neutron.objects import l3agent as rb_obj
+from neutron import policy
from neutron.tests.common import helpers
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
@@ -78,18 +79,21 @@ class AgentSchedulerTestMixIn(object):
def _path_req(self, path, method='GET', data=None,
query_string=None,
- admin_context=True):
+ admin_context=True,
+ req_tenant_id=None):
content_type = 'application/%s' % self.fmt
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
+ roles = ['member', 'reader']
+ req_tenant_id = req_tenant_id or self._tenant_id
if admin_context:
- return testlib_api.create_request(
- path, body, content_type, method, query_string=query_string)
- else:
- return testlib_api.create_request(
- path, body, content_type, method, query_string=query_string,
- context=context.Context('', 'tenant_id'))
+ roles.append('admin')
+ req = testlib_api.create_request(
+ path, body, content_type, method, query_string=query_string)
+ req.environ['neutron.context'] = context.Context(
+ '', req_tenant_id, roles=roles, is_admin=admin_context)
+ return req
def _path_create_request(self, path, data, admin_context=True):
return self._path_req(path, method='POST', data=data,
@@ -218,7 +222,7 @@ class AgentSchedulerTestMixIn(object):
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = admin_state_up
- self._update('agents', agent_id, new_agent)
+ self._update('agents', agent_id, new_agent, as_admin=True)
def _get_agent_id(self, agent_type, host):
agents = self._list_agents()
@@ -269,6 +273,7 @@ class OvsAgentSchedulerTestCaseBase(test_l3.L3NatTestCaseMixin,
self.dhcp_notify_p = mock.patch(
'neutron.extensions.dhcpagentscheduler.notify')
self.patched_dhcp_notify = self.dhcp_notify_p.start()
+ policy.init()
class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
@@ -911,10 +916,12 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self.assertNotEqual(agent['host'], new_agent_host)
def test_router_auto_schedule_with_invalid_router(self):
- with self.router() as router:
+ project_id = uuidutils.generate_uuid()
+ with self.router(project_id=project_id) as router:
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
- self._delete('routers', router['router']['id'])
+ self._delete('routers', router['router']['id'],
+ tenant_id=project_id)
# deleted router
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
@@ -1106,19 +1113,22 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self.assertEqual(0, len(router_ids))
def test_router_without_l3_agents(self):
+ project_id = uuidutils.generate_uuid()
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
- data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
+ data = {'router': {'tenant_id': project_id}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
- router_req = self.new_create_request('routers', data, self.fmt)
+ router_req = self.new_create_request(
+ 'routers', data, self.fmt, tenant_id=project_id)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
l3agents = (
self.l3plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['router']['id']]))
- self._delete('routers', router['router']['id'])
+ self._delete(
+ 'routers', router['router']['id'], tenant_id=project_id)
self.assertEqual(0, len(l3agents))
def test_dvr_router_scheduling_to_only_dvr_snat_agent(self):
@@ -1217,26 +1227,30 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self.assertEqual(agent['id'], new_agent['id'])
def test_router_sync_data(self):
- with self.subnet() as s1,\
- self.subnet(cidr='10.0.2.0/24') as s2,\
- self.subnet(cidr='10.0.3.0/24') as s3:
+ project_id = uuidutils.generate_uuid()
+ with self.subnet(project_id=project_id) as s1,\
+ self.subnet(project_id=project_id, cidr='10.0.2.0/24') as s2,\
+ self.subnet(project_id=project_id, cidr='10.0.3.0/24') as s3:
self._register_agent_states()
self._set_net_external(s1['subnet']['network_id'])
- data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
+ data = {'router': {'tenant_id': project_id}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s1['subnet']['network_id']}
- router_req = self.new_create_request('routers', data, self.fmt)
+ router_req = self.new_create_request(
+ 'routers', data, self.fmt, tenant_id=project_id)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
s2['subnet']['id'],
- None)
+ None,
+ tenant_id=project_id)
self._router_interface_action('add',
router['router']['id'],
s3['subnet']['id'],
- None)
+ None,
+ tenant_id=project_id)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1, len(l3agents['agents']))
@@ -1267,7 +1281,8 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self._router_interface_action('remove',
router['router']['id'],
s2['subnet']['id'],
- None)
+ None,
+ tenant_id=project_id)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1,
@@ -1275,8 +1290,10 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self._router_interface_action('remove',
router['router']['id'],
s3['subnet']['id'],
- None)
- self._delete('routers', router['router']['id'])
+ None,
+ tenant_id=project_id)
+ self._delete('routers', router['router']['id'],
+ tenant_id=project_id)
def _test_router_add_to_l3_agent(self, admin_state_up=True):
with self.router() as router1:
diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py
index a25ccc4f0d..b66492c3ea 100644
--- a/neutron/tests/unit/db/test_db_base_plugin_v2.py
+++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py
@@ -246,60 +246,117 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
query_string=params, context=context,
headers=headers)
+ def _admin_req(self, method, resource, data=None, fmt=None, id=None,
+ params=None, action=None, subresource=None, sub_id=None,
+ ctx=None, headers=None, tenant_id=None):
+ tenant_id = tenant_id or self._tenant_id
+ req = self._req(method, resource, data, fmt, id, params, action,
+ subresource, sub_id, ctx, headers)
+ req.environ['neutron.context'] = context.Context(
+ '', tenant_id, is_admin=True,
+ roles=['admin', 'member', 'reader'])
+ return req
+
+ def _member_req(self, method, resource, data=None, fmt=None, id=None,
+ params=None, action=None, subresource=None, sub_id=None,
+ ctx=None, headers=None, tenant_id=None):
+ tenant_id = tenant_id or self._tenant_id
+ req = self._req(method, resource, data, fmt, id, params, action,
+ subresource, sub_id, ctx, headers)
+ req.environ['neutron.context'] = context.Context(
+ '', tenant_id, roles=['member', 'reader'])
+ return req
+
+ def _reader_req(self, method, resource, data=None, fmt=None, id=None,
+ params=None, action=None, subresource=None, sub_id=None,
+ ctx=None, headers=None, tenant_id=None):
+ tenant_id = tenant_id or self._tenant_id
+ req = self._req(method, resource, data, fmt, id, params, action,
+ subresource, sub_id, ctx, headers)
+ req.environ['neutron.context'] = context.Context(
+ '', tenant_id, roles=['reader'])
+ return req
+
def new_create_request(self, resource, data, fmt=None, id=None,
- subresource=None, context=None):
- return self._req('POST', resource, data, fmt, id=id,
- subresource=subresource, context=context)
+ subresource=None, context=None, tenant_id=None,
+ as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req(
+ 'POST', resource, data, fmt, id=id,
+ subresource=subresource, ctx=context, tenant_id=tenant_id)
+ return self._member_req('POST', resource, data, fmt, id=id,
+ subresource=subresource, ctx=context,
+ tenant_id=tenant_id)
def new_list_request(self, resource, fmt=None, params=None,
- subresource=None, parent_id=None):
- return self._req(
+ subresource=None, parent_id=None, tenant_id=None,
+ as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req(
+ 'GET', resource, None, fmt, params=params, id=parent_id,
+ subresource=subresource, tenant_id=tenant_id
+ )
+ return self._reader_req(
'GET', resource, None, fmt, params=params, id=parent_id,
- subresource=subresource
+ subresource=subresource, tenant_id=tenant_id
)
def new_show_request(self, resource, id, fmt=None,
- subresource=None, fields=None, sub_id=None):
+ subresource=None, fields=None, sub_id=None,
+ tenant_id=None, as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
if fields:
params = "&".join(["fields=%s" % x for x in fields])
else:
params = None
- return self._req('GET', resource, None, fmt, id=id,
- params=params, subresource=subresource, sub_id=sub_id)
+ if as_admin:
+ return self._admin_req('GET', resource, None, fmt, id=id,
+ params=params, subresource=subresource,
+ sub_id=sub_id, tenant_id=tenant_id)
+ return self._reader_req('GET', resource, None, fmt, id=id,
+ params=params, subresource=subresource,
+ sub_id=sub_id, tenant_id=tenant_id)
def new_delete_request(self, resource, id, fmt=None, subresource=None,
- sub_id=None, data=None, headers=None):
- return self._req(
- 'DELETE',
- resource,
- data,
- fmt,
- id=id,
- subresource=subresource,
- sub_id=sub_id,
- headers=headers
- )
+ sub_id=None, data=None, headers=None,
+ tenant_id=None, as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req('DELETE', resource, data, fmt, id=id,
+ subresource=subresource, sub_id=sub_id,
+ headers=headers, tenant_id=tenant_id)
+ return self._member_req('DELETE', resource, data, fmt, id=id,
+ subresource=subresource, sub_id=sub_id,
+ headers=headers, tenant_id=tenant_id)
def new_update_request(self, resource, data, id, fmt=None,
subresource=None, context=None, sub_id=None,
- headers=None):
- return self._req(
+ headers=None, as_admin=False, tenant_id=None):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req(
+ 'PUT', resource, data, fmt, id=id, subresource=subresource,
+ sub_id=sub_id, ctx=context, headers=headers,
+ tenant_id=tenant_id
+ )
+ return self._member_req(
'PUT', resource, data, fmt, id=id, subresource=subresource,
- sub_id=sub_id, context=context, headers=headers
+ sub_id=sub_id, ctx=context, headers=headers, tenant_id=tenant_id
)
def new_action_request(self, resource, data, id, action, fmt=None,
- subresource=None, sub_id=None):
- return self._req(
- 'PUT',
- resource,
- data,
- fmt,
- id=id,
- action=action,
- subresource=subresource,
- sub_id=sub_id
- )
+ subresource=None, sub_id=None, tenant_id=None,
+ as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req('PUT', resource, data, fmt, id=id,
+ action=action, subresource=subresource,
+ sub_id=sub_id, tenant_id=tenant_id)
+ return self._member_req('PUT', resource, data, fmt, id=id,
+ action=action, subresource=subresource,
+ sub_id=sub_id, tenant_id=tenant_id)
def deserialize(self, content_type, response):
ctype = 'application/%s' % content_type
@@ -328,23 +385,19 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
return random.choice(ip_list)
return ip_list[0]
- def _create_bulk_from_list(self, fmt, resource, objects, **kwargs):
+ def _create_bulk_from_list(self, fmt, resource, objects, tenant_id=None,
+ as_admin=False, **kwargs):
"""Creates a bulk request from a list of objects."""
collection = "%ss" % resource
req_data = {collection: objects}
- req = self.new_create_request(collection, req_data, fmt)
- if ('set_context' in kwargs and
- kwargs['set_context'] is True and
- 'tenant_id' in kwargs):
- # create a specific auth context for this request
- req.environ['neutron.context'] = context.Context(
- '', kwargs['tenant_id'])
- elif 'context' in kwargs:
- req.environ['neutron.context'] = kwargs['context']
+ req = self.new_create_request(collection, req_data, fmt,
+ tenant_id=tenant_id, as_admin=as_admin)
return req.get_response(self.api)
- def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs):
+ def _create_bulk(self, fmt, number, resource, data, name='test',
+ tenant_id=None, as_admin=False, **kwargs):
"""Creates a bulk request for any kind of resource."""
+ tenant_id = tenant_id or self._tenant_id
objects = []
collection = "%ss" % resource
for i in range(number):
@@ -354,19 +407,13 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
obj[resource].update(kwargs['override'][i])
objects.append(obj)
req_data = {collection: objects}
- req = self.new_create_request(collection, req_data, fmt)
- if ('set_context' in kwargs and
- kwargs['set_context'] is True and
- 'tenant_id' in kwargs):
- # create a specific auth context for this request
- req.environ['neutron.context'] = context.Context(
- '', kwargs['tenant_id'])
- elif 'context' in kwargs:
- req.environ['neutron.context'] = kwargs['context']
+ req = self.new_create_request(collection, req_data, fmt,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
return req.get_response(self.api)
def _create_network(self, fmt, name, admin_state_up,
- arg_list=None, set_context=False, tenant_id=None,
+ arg_list=None, tenant_id=None, as_admin=False,
**kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'network': {'name': name,
@@ -378,11 +425,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
# Arg must be present
if arg in kwargs:
data['network'][arg] = kwargs[arg]
- network_req = self.new_create_request('networks', data, fmt)
- if set_context and tenant_id:
- # create a specific auth context for this request
- network_req.environ['neutron.context'] = context.Context(
- '', tenant_id)
+ network_req = self.new_create_request('networks', data, fmt,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
return network_req.get_response(self.api)
@@ -392,11 +437,12 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
'tenant_id': self._tenant_id}}
return self._create_bulk(fmt, number, 'network', base_data, **kwargs)
- def _create_subnet(self, fmt, net_id, cidr,
- expected_res_status=None, **kwargs):
+ def _create_subnet(self, fmt, net_id, cidr, expected_res_status=None,
+ tenant_id=None, as_admin=False, **kwargs):
+ tenant_id = tenant_id or self._tenant_id
data = {'subnet': {'network_id': net_id,
'ip_version': constants.IP_VERSION_4,
- 'tenant_id': self._tenant_id}}
+ 'tenant_id': tenant_id}}
if cidr:
data['subnet']['cidr'] = cidr
for arg in ('ip_version', 'tenant_id', 'subnetpool_id', 'prefixlen',
@@ -412,11 +458,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
kwargs['gateway_ip'] is not constants.ATTR_NOT_SPECIFIED):
data['subnet']['gateway_ip'] = kwargs['gateway_ip']
- subnet_req = self.new_create_request('subnets', data, fmt)
- if (kwargs.get('set_context') and 'tenant_id' in kwargs):
- # create a specific auth context for this request
- subnet_req.environ['neutron.context'] = context.Context(
- '', kwargs['tenant_id'])
+ subnet_req = self.new_create_request('subnets', data, fmt,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
subnet_res = subnet_req.get_response(self.api)
if expected_res_status:
@@ -443,24 +487,25 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs)
def _create_subnetpool(self, fmt, prefixes,
- expected_res_status=None, admin=False, **kwargs):
+ expected_res_status=None, admin=False,
+ tenant_id=None, **kwargs):
+ tenant_id = tenant_id or self._tenant_id
subnetpool = {'subnetpool': {'prefixes': prefixes}}
for k, v in kwargs.items():
subnetpool['subnetpool'][k] = str(v)
api = self._api_for_resource('subnetpools')
subnetpools_req = self.new_create_request('subnetpools',
- subnetpool, fmt)
- if not admin:
- neutron_context = context.Context('', kwargs['tenant_id'])
- subnetpools_req.environ['neutron.context'] = neutron_context
+ subnetpool, fmt,
+ tenant_id=tenant_id,
+ as_admin=admin)
subnetpool_res = subnetpools_req.get_response(api)
if expected_res_status:
self.assertEqual(expected_res_status, subnetpool_res.status_int)
return subnetpool_res
def _create_port(self, fmt, net_id, expected_res_status=None,
- arg_list=None, set_context=False, is_admin=False,
+ arg_list=None, is_admin=False,
tenant_id=None, **kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'port': {'network_id': net_id,
@@ -481,11 +526,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
'device_id' not in kwargs):
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
data['port']['device_id'] = device_id
- port_req = self.new_create_request('ports', data, fmt)
- if set_context and tenant_id:
- # create a specific auth context for this request
- port_req.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=is_admin)
+ port_req = self.new_create_request('ports', data, fmt,
+ tenant_id=tenant_id,
+ as_admin=is_admin)
port_res = port_req.get_response(self.api)
if expected_res_status:
@@ -499,28 +542,26 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
query_params.append("network_id=%s" % net_id)
if kwargs.get('device_owner'):
query_params.append("device_owner=%s" % kwargs.get('device_owner'))
- port_req = self.new_list_request('ports', fmt, '&'.join(query_params))
- if ('set_context' in kwargs and
- kwargs['set_context'] is True and
- 'tenant_id' in kwargs):
- # create a specific auth context for this request
- port_req.environ['neutron.context'] = context.Context(
- '', kwargs['tenant_id'])
-
+ port_req = self.new_list_request('ports', fmt, '&'.join(query_params),
+ tenant_id=kwargs.get('tenant_id'))
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(expected_res_status, port_res.status_int)
return port_res
def _create_port_bulk(self, fmt, number, net_id, name,
- admin_state_up, **kwargs):
+ admin_state_up, tenant_id=None, as_admin=False,
+ **kwargs):
base_data = {'port': {'network_id': net_id,
- 'admin_state_up': admin_state_up,
- 'tenant_id': self._tenant_id}}
- return self._create_bulk(fmt, number, 'port', base_data, **kwargs)
-
- def _make_network(self, fmt, name, admin_state_up, **kwargs):
- res = self._create_network(fmt, name, admin_state_up, **kwargs)
+ 'admin_state_up': admin_state_up}}
+ return self._create_bulk(fmt, number, 'port', base_data,
+ tenant_id=tenant_id, as_admin=as_admin,
+ **kwargs)
+
+ def _make_network(self, fmt, name, admin_state_up, as_admin=False,
+ **kwargs):
+ res = self._create_network(fmt, name, admin_state_up,
+ as_admin=as_admin, **kwargs)
# TODO(salvatore-orlando): do exception handling in this test module
# in a uniform way (we do it differently for ports, subnets, and nets
# Things can go wrong - raise HTTP exc with res code only
@@ -533,7 +574,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
allocation_pools=None, ip_version=constants.IP_VERSION_4,
enable_dhcp=True, dns_nameservers=None, host_routes=None,
shared=None, ipv6_ra_mode=None, ipv6_address_mode=None,
- tenant_id=None, set_context=False, segment_id=None):
+ tenant_id=None, segment_id=None, as_admin=False):
res = self._create_subnet(fmt,
net_id=network['network']['id'],
cidr=cidr,
@@ -550,7 +591,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
shared=shared,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_address_mode,
- set_context=set_context)
+ as_admin=as_admin)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
@@ -572,11 +613,13 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
ipv6_ra_mode=ra_addr_mode,
ipv6_address_mode=ra_addr_mode))
- def _make_subnetpool(self, fmt, prefixes, admin=False, **kwargs):
+ def _make_subnetpool(self, fmt, prefixes, admin=False, tenant_id=None,
+ **kwargs):
res = self._create_subnetpool(fmt,
prefixes,
None,
admin,
+ tenant_id=tenant_id,
**kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
@@ -584,8 +627,10 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
- def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs):
- res = self._create_port(fmt, net_id, expected_res_status, **kwargs)
+ def _make_port(self, fmt, net_id, expected_res_status=None,
+ as_admin=False, **kwargs):
+ res = self._create_port(fmt, net_id, expected_res_status,
+ is_admin=as_admin, **kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
@@ -596,7 +641,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
max_burst_kbps=None, dscp_mark=None, min_kbps=None,
direction=constants.EGRESS_DIRECTION,
expected_res_status=None, project_id=None,
- set_context=False, is_admin=False):
+ is_admin=False):
# Accepted rule types: "bandwidth_limit", "dscp_marking" and
# "minimum_bandwidth"
self.assertIn(rule_type, [qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
@@ -615,11 +660,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
data[type_req][qos_const.MIN_KBPS] = min_kbps
data[type_req][qos_const.DIRECTION] = direction
route = 'qos/policies/%s/%s' % (qos_policy_id, type_req + 's')
- qos_rule_req = self.new_create_request(route, data, fmt)
- if set_context and project_id:
- # create a specific auth context for this request
- qos_rule_req.environ['neutron.context'] = context.Context(
- '', project_id, is_admin=is_admin)
+ qos_rule_req = self.new_create_request(route, data, fmt,
+ tenant_id=project_id,
+ as_admin=is_admin)
qos_rule_res = qos_rule_req.get_response(self.api)
if expected_res_status:
@@ -628,16 +671,14 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
def _create_qos_policy(self, fmt, qos_policy_name=None,
expected_res_status=None, project_id=None,
- set_context=False, is_admin=False):
+ is_admin=False):
project_id = project_id or self._tenant_id
name = qos_policy_name or uuidutils.generate_uuid()
data = {'policy': {'name': name,
'project_id': project_id}}
- qos_req = self.new_create_request('policies', data, fmt)
- if set_context and project_id:
- # create a specific auth context for this request
- qos_req.environ['neutron.context'] = context.Context(
- '', project_id, is_admin=is_admin)
+ qos_req = self.new_create_request('policies', data, fmt,
+ tenant_id=project_id,
+ as_admin=is_admin)
qos_policy_res = qos_req.get_response(self.api)
if expected_res_status:
@@ -653,54 +694,49 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
def _delete(self, collection, id,
expected_code=webob.exc.HTTPNoContent.code,
- neutron_context=None, headers=None, subresource=None,
- sub_id=None):
+ headers=None, subresource=None, sub_id=None,
+ tenant_id=None, as_admin=False):
req = self.new_delete_request(collection, id, headers=headers,
- subresource=subresource, sub_id=sub_id)
- if neutron_context:
- # create a specific auth context for this request
- req.environ['neutron.context'] = neutron_context
+ subresource=subresource, sub_id=sub_id,
+ tenant_id=tenant_id, as_admin=as_admin)
+
res = req.get_response(self._api_for_resource(collection))
self.assertEqual(expected_code, res.status_int)
- def _show_response(self, resource, id, neutron_context=None):
- req = self.new_show_request(resource, id)
- if neutron_context:
- # create a specific auth context for this request
- req.environ['neutron.context'] = neutron_context
- elif hasattr(self, 'tenant_id'):
- req.environ['neutron.context'] = context.Context('',
- self.tenant_id)
+ def _show_response(self, resource, id, tenant_id=None, as_admin=False):
+ req = self.new_show_request(resource, id,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
return req.get_response(self._api_for_resource(resource))
def _show(self, resource, id,
expected_code=webob.exc.HTTPOk.code,
- neutron_context=None):
- res = self._show_response(resource, id,
- neutron_context=neutron_context)
+ tenant_id=None, as_admin=False):
+ res = self._show_response(resource, id, tenant_id=tenant_id,
+ as_admin=as_admin)
self.assertEqual(expected_code, res.status_int)
return self.deserialize(self.fmt, res)
def _update(self, resource, id, new_data,
- expected_code=webob.exc.HTTPOk.code,
- neutron_context=None, headers=None):
- req = self.new_update_request(resource, new_data, id, headers=headers)
- if neutron_context:
- # create a specific auth context for this request
- req.environ['neutron.context'] = neutron_context
+ expected_code=webob.exc.HTTPOk.code, headers=None,
+ request_tenant_id=None, as_admin=False):
+ req = self.new_update_request(
+ resource, new_data, id, headers=headers,
+ tenant_id=request_tenant_id, as_admin=as_admin)
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(expected_code, res.status_int)
return self.deserialize(self.fmt, res)
- def _list(self, resource, fmt=None, neutron_context=None,
+ def _list(self, resource, fmt=None,
query_params=None, expected_code=webob.exc.HTTPOk.code,
- parent_id=None, subresource=None):
+ parent_id=None, subresource=None,
+ tenant_id=None, as_admin=False):
fmt = fmt or self.fmt
req = self.new_list_request(resource, fmt, query_params,
subresource=subresource,
- parent_id=parent_id)
- if neutron_context:
- req.environ['neutron.context'] = neutron_context
+ parent_id=parent_id,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(expected_code, res.status_int)
return self.deserialize(fmt, res)
@@ -730,13 +766,14 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
self.assertEqual(items[0]['name'], 'test_0')
self.assertEqual(items[1]['name'], 'test_1')
- def _test_list_resources(self, resource, items, neutron_context=None,
- query_params=None,
- expected_code=webob.exc.HTTPOk.code):
+ def _test_list_resources(self, resource, items, query_params=None,
+ expected_code=webob.exc.HTTPOk.code,
+ tenant_id=None, as_admin=False):
res = self._list('%ss' % resource,
- neutron_context=neutron_context,
query_params=query_params,
- expected_code=expected_code)
+ expected_code=expected_code,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
if expected_code == webob.exc.HTTPOk.code:
resource = resource.replace('-', '_')
self.assertCountEqual([i['id'] for i in res['%ss' % resource]],
@@ -771,7 +808,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
tenant_id=None,
project_id=None,
service_types=None,
- set_context=False):
+ as_admin=False):
if project_id:
tenant_id = project_id
cidr = netaddr.IPNetwork(cidr) if cidr else None
@@ -780,7 +817,6 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
gateway_ip = netaddr.IPAddress(gateway_ip)
with optional_ctx(network, self.network,
- set_context=set_context,
tenant_id=tenant_id) as network_to_use:
subnet = self._make_subnet(fmt or self.fmt,
network_to_use,
@@ -797,7 +833,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_address_mode,
tenant_id=tenant_id,
- set_context=set_context)
+ as_admin=as_admin)
yield subnet
@contextlib.contextmanager
@@ -811,22 +847,22 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
yield subnetpool
@contextlib.contextmanager
- def port(self, subnet=None, fmt=None, set_context=False, project_id=None,
+ def port(self, subnet=None, fmt=None, project_id=None, is_admin=False,
**kwargs):
tenant_id = project_id if project_id else kwargs.pop(
'tenant_id', None)
with optional_ctx(
subnet, self.subnet,
- set_context=set_context, tenant_id=tenant_id) as subnet_to_use:
+ tenant_id=tenant_id) as subnet_to_use:
net_id = subnet_to_use['subnet']['network_id']
port = self._make_port(
- fmt or self.fmt, net_id,
- set_context=set_context, tenant_id=tenant_id,
- **kwargs)
+ fmt or self.fmt, net_id, tenant_id=tenant_id,
+ as_admin=is_admin, **kwargs)
yield port
def _test_list_with_sort(self, resource,
- items, sorts, resources=None, query_params=''):
+ items, sorts, resources=None, query_params='',
+ tenant_id=None, as_admin=False):
query_str = query_params
for key, direction in sorts:
query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key,
@@ -834,7 +870,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
if not resources:
resources = '%ss' % resource
req = self.new_list_request(resources,
- params=query_str)
+ params=query_str,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
api = self._api_for_resource(resources)
res = self.deserialize(self.fmt, req.get_response(api))
resource = resource.replace('-', '_')
@@ -846,13 +884,17 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
limit, expected_page_num,
resources=None,
query_params='',
- verify_key='id'):
+ verify_key='id',
+ tenant_id=None,
+ as_admin=False):
if not resources:
resources = '%ss' % resource
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&sort_key=%s&"
"sort_dir=%s") % (limit, sort[0], sort[1])
- req = self.new_list_request(resources, params=query_str)
+ req = self.new_list_request(resources, params=query_str,
+ tenant_id=tenant_id, as_admin=as_admin)
+ neutron_ctx = req.environ['neutron.context']
items_res = []
page_num = 0
api = self._api_for_resource(resources)
@@ -871,6 +913,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
+ req.environ['neutron.context'] = neutron_ctx
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
@@ -880,7 +923,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
def _test_list_with_pagination_reverse(self, resource, items, sort,
limit, expected_page_num,
resources=None,
- query_params=''):
+ query_params='',
+ tenant_id=None,
+ as_admin=False):
if not resources:
resources = '%ss' % resource
resource = resource.replace('-', '_')
@@ -891,7 +936,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
"sort_key=%s&sort_dir=%s&"
"marker=%s") % (limit, sort[0], sort[1],
marker)
- req = self.new_list_request(resources, params=query_str)
+ req = self.new_list_request(resources, params=query_str,
+ tenant_id=tenant_id, as_admin=as_admin)
+ neutron_ctx = req.environ['neutron.context']
item_res = [items[-1][resource]]
page_num = 0
resources = resources.replace('-', '_')
@@ -909,6 +956,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
+ req.environ['neutron.context'] = neutron_ctx
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
@@ -1001,10 +1049,9 @@ class TestV2HTTPResponse(NeutronDbPluginV2TestCase):
self._create_network(self.fmt,
'some_net',
True,
- tenant_id=tenant_id,
- set_context=True)
- req = self.new_list_request('networks', params="fields=name")
- req.environ['neutron.context'] = context.Context('', tenant_id)
+ tenant_id=tenant_id)
+ req = self.new_list_request(
+ 'networks', params="fields=name", tenant_id=tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'name')
@@ -1020,10 +1067,9 @@ class TestV2HTTPResponse(NeutronDbPluginV2TestCase):
self._create_network(self.fmt,
'some_net',
True,
- tenant_id=tenant_id,
- set_context=True)
- req = self.new_list_request('networks', params="fields=tenant_id")
- req.environ['neutron.context'] = context.Context('', tenant_id)
+ tenant_id=tenant_id)
+ req = self.new_list_request(
+ 'networks', params="fields=tenant_id", tenant_id=tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'tenant_id')
@@ -1086,7 +1132,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_port_json(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network) as subnet:
with self.port(name='myname', subnet=subnet) as port:
for k, v in keys:
@@ -1108,7 +1154,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
device_id='fake_device',
device_owner='fake_owner',
fixed_ips=[],
- set_context=False)
+ is_admin=True)
def test_create_port_bad_tenant(self):
with self.network() as network:
@@ -1118,17 +1164,15 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
tenant_id='bad_tenant_id',
device_id='fake_device',
device_owner='fake_owner',
- fixed_ips=[],
- set_context=True)
+ fixed_ips=[])
def test_create_port_public_network(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='another_tenant',
- set_context=True)
+ tenant_id='another_tenant')
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
@@ -1147,11 +1191,10 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
webob.exc.HTTPClientError.code,
tenant_id='tenant_id',
fixed_ips=[],
- set_context=False,
**kwargs)
def test_create_port_public_network_with_ip(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
ip_net = netaddr.IPNetwork('10.0.0.0/24')
with self.subnet(network=network, cidr=str(ip_net)):
keys = [('admin_state_up', True),
@@ -1159,8 +1202,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='another_tenant',
- set_context=True)
+ tenant_id='another_tenant')
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
@@ -1170,7 +1212,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
self._delete('ports', port['port']['id'])
def test_create_port_anticipating_allocation(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
fixed_ips = [{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id'],
@@ -1181,14 +1223,13 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_port_public_network_with_invalid_ip_no_subnet_id(self,
expected_error='InvalidIpForNetwork'):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24'):
ips = [{'ip_address': '1.1.1.1'}]
res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPBadRequest.code,
- fixed_ips=ips,
- set_context=True)
+ fixed_ips=ips)
data = self.deserialize(self.fmt, res)
msg = str(lib_exc.InvalidIpForNetwork(ip_address='1.1.1.1'))
self.assertEqual(expected_error, data['NeutronError']['type'])
@@ -1196,15 +1237,14 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_port_public_network_with_invalid_ip_and_subnet_id(self,
expected_error='InvalidIpForSubnet'):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '1.1.1.1'}]
res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPBadRequest.code,
- fixed_ips=ips,
- set_context=True)
+ fixed_ips=ips)
data = self.deserialize(self.fmt, res)
msg = str(lib_exc.InvalidIpForSubnet(ip_address='1.1.1.1'))
self.assertEqual(expected_error, data['NeutronError']['type'])
@@ -1342,29 +1382,29 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
self._test_list_ports_filtered_by_fixed_ip(limit=500)
def test_list_ports_public_network(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network) as subnet:
with self.port(subnet, tenant_id='tenant_1') as port1,\
self.port(subnet, tenant_id='tenant_2') as port2:
# Admin request - must return both ports
- self._test_list_resources('port', [port1, port2])
+ self._test_list_resources(
+ 'port', [port1, port2], as_admin=True)
# Tenant_1 request - must return single port
- n_context = context.Context('', 'tenant_1')
self._test_list_resources('port', [port1],
- neutron_context=n_context)
+ tenant_id='tenant_1')
# Tenant_2 request - must return single port
- n_context = context.Context('', 'tenant_2')
self._test_list_resources('port', [port2],
- neutron_context=n_context)
+ tenant_id='tenant_2')
def test_list_ports_for_network_owner(self):
with self.network(tenant_id='tenant_1') as network:
- with self.subnet(network) as subnet:
- with self.port(subnet, tenant_id='tenant_1') as port1,\
- self.port(subnet, tenant_id='tenant_2') as port2:
+ with self.subnet(network, tenant_id='tenant_1') as subnet:
+ with self.port(subnet, project_id='tenant_1') as port1,\
+ self.port(subnet, project_id='tenant_2',
+ is_admin=True) as port2:
# network owner request, should return all ports
port_res = self._list_ports(
- 'json', set_context=True, tenant_id='tenant_1')
+ 'json', tenant_id='tenant_1')
port_list = self.deserialize('json', port_res)['ports']
port_ids = [p['id'] for p in port_list]
self.assertEqual(2, len(port_list))
@@ -1373,7 +1413,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
# another tenant request, only return ports belong to it
port_res = self._list_ports(
- 'json', set_context=True, tenant_id='tenant_2')
+ 'json', tenant_id='tenant_2')
port_list = self.deserialize('json', port_res)['ports']
port_ids = [p['id'] for p in port_list]
self.assertEqual(1, len(port_list))
@@ -1467,12 +1507,11 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
expected_code=webob.exc.HTTPNotFound.code)
def test_delete_port_public_network(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='another_tenant',
- set_context=True)
+ tenant_id='another_tenant')
port = self.deserialize(self.fmt, port_res)
self._delete('ports', port['port']['id'])
@@ -1482,15 +1521,15 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
def test_delete_port_by_network_owner(self):
with self.network(tenant_id='tenant_1') as network:
with self.subnet(network) as subnet:
- with self.port(subnet, tenant_id='tenant_2') as port:
+ with self.port(subnet, tenant_id='tenant_2',
+ is_admin=True) as port:
self._delete(
- 'ports', port['port']['id'],
- neutron_context=context.Context('', 'tenant_1'))
+ 'ports', port['port']['id'], tenant_id='tenant_1')
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_update_port_with_stale_subnet(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
port = self._make_port(self.fmt, network['network']['id'])
subnet = self._make_subnet(self.fmt, network,
'10.0.0.1', '10.0.0.0/24')
@@ -1528,7 +1567,8 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
data = {'port': {'mac_address': new_mac}}
if updated_fixed_ips:
data['port']['fixed_ips'] = updated_fixed_ips
- req = self.new_update_request('ports', data, port['id'])
+ req = self.new_update_request(
+ 'ports', data, port['id'], as_admin=True)
return req.get_response(self.api), new_mac
def _verify_ips_after_mac_change(self, orig_port, new_port):
@@ -1553,6 +1593,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
host_arg = host_arg or {}
arg_list = arg_list or []
with self.port(device_owner=device_owner, subnet=subnet,
+ is_admin=True,
arg_list=arg_list, **host_arg) as port:
self.assertIn('mac_address', port['port'])
res, new_mac = self.update_port_mac(
@@ -1634,7 +1675,8 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
new_mac = port2['port']['mac_address']
data = {'port': {'mac_address': new_mac}}
req = self.new_update_request('ports', data,
- port['port']['id'])
+ port['port']['id'],
+ as_admin=True)
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPConflict.code,
res.status_int)
@@ -1647,16 +1689,14 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
def test_update_port_not_admin(self):
res = self._create_network(self.fmt, 'net1', True,
- tenant_id='not_admin',
- set_context=True)
+ tenant_id='not_admin')
net1 = self.deserialize(self.fmt, res)
res = self._create_port(self.fmt, net1['network']['id'],
- tenant_id='not_admin', set_context=True)
+ tenant_id='not_admin')
port = self.deserialize(self.fmt, res)
data = {'port': {'admin_state_up': False}}
- neutron_context = context.Context('', 'not_admin')
port = self._update('ports', port['port']['id'], data,
- neutron_context=neutron_context)
+ request_tenant_id='not_admin')
self.assertFalse(port['port']['admin_state_up'])
def test_update_device_id_unchanged(self):
@@ -2746,7 +2786,7 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
name = 'public_net'
keys = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', self.net_create_status), ('shared', True)]
- with self.network(name=name, shared=True) as net:
+ with self.network(name=name, shared=True, as_admin=True) as net:
for k, v in keys:
self.assertEqual(net['network'][k], v)
@@ -2756,8 +2796,7 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
webob.exc.HTTPClientError) as ctx_manager:
with self.network(name=name,
shared=True,
- tenant_id="another_tenant",
- set_context=True):
+ tenant_id="another_tenant"):
pass
self.assertEqual(webob.exc.HTTPForbidden.code,
ctx_manager.exception.code)
@@ -2773,12 +2812,12 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
res['network']['name'])
def test_update_shared_network_noadmin_returns_403(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
data = {'network': {'name': 'a_brand_new_name'}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
- req.environ['neutron.context'] = context.Context('', 'somebody')
+ network['network']['id'],
+ tenant_id='other-tenant')
res = req.get_response(self.api)
self.assertEqual(403, res.status_int)
@@ -2787,7 +2826,8 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
@@ -2808,7 +2848,8 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
# must query db to see whether subnet's shared attribute
@@ -2819,39 +2860,38 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
self.assertTrue(subnet_db['shared'])
def test_update_network_set_not_shared_single_tenant(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id=network['network']['tenant_id'],
- set_context=True)
+ tenant_id=network['network']['tenant_id'])
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertFalse(res['network']['shared'])
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
- def test_update_network_set_not_shared_other_tenant_returns_409(self):
- with self.network(shared=True) as network:
+ def test_update_network_set_not_shared_other_tenant_returns_403(self):
+ with self.network(shared=True, as_admin=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='somebody_else',
- set_context=True)
+ tenant_id='somebody_else')
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
- self.assertEqual(webob.exc.HTTPConflict.code,
+ self.assertEqual(webob.exc.HTTPForbidden.code,
req.get_response(self.api).status_int)
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_other_tenant_access_via_rbac(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
ctx = context.get_admin_context()
with db_api.CONTEXT_WRITER.using(ctx):
network_obj.NetworkRBAC(
@@ -2867,33 +2907,32 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='somebody_else',
- set_context=True)
+ tenant_id='somebody_else')
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertFalse(res['network']['shared'])
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_multi_tenants_returns_409(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='somebody_else',
- set_context=True)
+ tenant_id='somebody_else')
res2 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id=network['network']['tenant_id'],
- set_context=True)
+ tenant_id=network['network']['tenant_id'])
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
self.assertEqual(webob.exc.HTTPConflict.code,
req.get_response(self.api).status_int)
port1 = self.deserialize(self.fmt, res1)
@@ -2902,22 +2941,21 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
self._delete('ports', port2['port']['id'])
def test_update_network_set_not_shared_multi_tenants2_returns_409(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='somebody_else',
- set_context=True)
+ tenant_id='somebody_else')
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPCreated.code,
- tenant_id=network['network']['tenant_id'],
- set_context=True)
+ tenant_id=network['network']['tenant_id'])
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
self.assertEqual(webob.exc.HTTPConflict.code,
req.get_response(self.api).status_int)
@@ -2967,7 +3005,8 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
- res = self._create_bulk_from_list(self.fmt, 'network', networks)
+ res = self._create_bulk_from_list(self.fmt, 'network', networks,
+ as_admin=True)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_networks_bulk_tenants_and_quotas_fail(self):
@@ -2987,7 +3026,8 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
- res = self._create_bulk_from_list(self.fmt, 'network', networks)
+ res = self._create_bulk_from_list(self.fmt, 'network', networks,
+ as_admin=True)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_networks_bulk_emulated(self):
@@ -3136,9 +3176,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
- with self.network(name='net1', shared=True) as net1,\
+ with self.network(name='net1', shared=True, as_admin=True) as net1,\
self.network(name='net2', shared=False) as net2,\
- self.network(name='net3', shared=True) as net3:
+ self.network(name='net3', shared=True, as_admin=True) as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2,
@@ -3215,14 +3255,13 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
tenant_id='tenant1') as net1,\
self.network(shared=True,
name='net2',
+ as_admin=True,
tenant_id='another_tenant') as net2,\
self.network(shared=False,
name='net3',
tenant_id='another_tenant'):
- ctx = context.Context(user_id='non_admin',
- tenant_id='tenant1',
- is_admin=False)
- self._test_list_resources('network', (net1, net2), ctx)
+ self._test_list_resources('network', (net1, net2),
+ tenant_id='tenant1')
def test_show_network(self):
with self.network(name='net1') as net:
@@ -3760,8 +3799,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
ip_version=constants.IP_VERSION_4,
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
- device_owner='fake_owner',
- set_context=True)
+ device_owner='fake_owner')
def test_create_subnet_as_admin(self):
with self.network() as network:
@@ -3773,7 +3811,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
device_owner='fake_owner',
- set_context=False)
+ as_admin=True)
def test_create_subnet_nonzero_cidr(self):
# Pass None as gateway_ip to prevent ip auto allocation for gw
@@ -4464,7 +4502,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
'tenant_id': 'tenant_id',
'device_id': 'fake_device',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW}
- res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+ res = self._create_port(self.fmt, net_id=net_id,
+ is_admin=True, **kwargs)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_subnet_ipv6_first_ip_owned_by_non_router(self):
@@ -4480,7 +4519,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
'tenant_id': 'tenant_id',
'device_id': 'fake_device',
'device_owner': 'fake_owner'}
- res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+ res = self._create_port(self.fmt, net_id=net_id,
+ is_admin=True, **kwargs)
self.assertEqual(webob.exc.HTTPClientError.code,
res.status_int)
@@ -4804,7 +4844,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
ra_addr_mode=constants.DHCPV6_STATELESS)
def test_update_subnet_shared_returns_400(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'shared': True}}
req = self.new_update_request('subnets', data,
@@ -5294,7 +5334,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
with self.subnet(network=network,
gateway_ip='10.0.0.1',
cidr='10.0.0.0/24',
- tenant_id=project_id),\
+ tenant_id=project_id,
+ as_admin=True),\
self.subnet(network=network,
gateway_ip='10.0.1.1',
cidr='10.0.1.0/24'),\
@@ -5351,7 +5392,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
self._test_list_resources('subnet', subnets)
def test_list_subnets_shared(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as priv_subnet:
# normal user should see only 1 subnet
@@ -6117,8 +6158,7 @@ class TestSubnetPoolsV2(NeutronDbPluginV2TestCase):
min_prefixlen='24',
shared=True)
admin_res = self._list('subnetpools')
- mortal_res = self._list('subnetpools',
- neutron_context=context.Context('', 'not-the-owner'))
+ mortal_res = self._list('subnetpools', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['subnetpools']))
self.assertEqual(1, len(mortal_res['subnetpools']))
@@ -6130,8 +6170,7 @@ class TestSubnetPoolsV2(NeutronDbPluginV2TestCase):
min_prefixlen='24',
shared=False)
admin_res = self._list('subnetpools')
- mortal_res = self._list('subnetpools',
- neutron_context=context.Context('', 'not-the-owner'))
+ mortal_res = self._list('subnetpools', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['subnetpools']))
self.assertEqual(0, len(mortal_res['subnetpools']))
@@ -7197,10 +7236,10 @@ class DbOperationBoundMixin(object):
def get_api_kwargs(self):
context_ = self._get_context()
- return {'set_context': True, 'tenant_id': context_.project_id}
+ return {'tenant_id': context_.project_id}
def _list_and_record_queries(self, resource, query_params=None):
- kwargs = {'neutron_context': self._get_context()}
+ kwargs = {}
if query_params:
kwargs['query_params'] = query_params
# list once before tracking to flush out any quota recalculations.
diff --git a/neutron/tests/unit/db/test_dvr_mac_db.py b/neutron/tests/unit/db/test_dvr_mac_db.py
index 80d650a7d8..6f87672712 100644
--- a/neutron/tests/unit/db/test_dvr_mac_db.py
+++ b/neutron/tests/unit/db/test_dvr_mac_db.py
@@ -188,22 +188,28 @@ class DvrDbMixinTestCase(test_plugin.Ml2PluginV2TestCase):
arg_list = (portbindings.HOST_ID,)
with self.subnet() as subnet,\
self.port(subnet=subnet,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX,
arg_list=arg_list, **host_arg) as compute_port,\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_DHCP,
+ is_admin=True,
arg_list=arg_list, **host_arg) as dhcp_port,\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_LOADBALANCER,
+ is_admin=True,
arg_list=arg_list, **host_arg) as lb_port,\
self.port(device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX,
+ is_admin=True,
arg_list=arg_list, **host_arg),\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX,
+ is_admin=True,
arg_list=arg_list,
**{portbindings.HOST_ID: 'other'}),\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX,
+ is_admin=True,
arg_list=arg_list, **host_arg):
expected_ids = [port['port']['id'] for port in
[compute_port, dhcp_port, lb_port]]
diff --git a/neutron/tests/unit/db/test_ipam_backend_mixin.py b/neutron/tests/unit/db/test_ipam_backend_mixin.py
index e81a908ec2..fa2872a317 100644
--- a/neutron/tests/unit/db/test_ipam_backend_mixin.py
+++ b/neutron/tests/unit/db/test_ipam_backend_mixin.py
@@ -373,7 +373,8 @@ class TestPortUpdateIpam(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
arg_list=(portbindings.HOST_ID,),
- **{portbindings.HOST_ID: 'fakehost'})
+ **{portbindings.HOST_ID: 'fakehost'},
+ is_admin=True)
port = self.deserialize(self.fmt, response)
# Create the subnet and try to update the port to get an IP
@@ -381,7 +382,8 @@ class TestPortUpdateIpam(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'port': {
'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
res = self.deserialize(self.fmt, response)
diff --git a/neutron/tests/unit/db/test_ipam_pluggable_backend.py b/neutron/tests/unit/db/test_ipam_pluggable_backend.py
index b77ea16458..958c0abbc6 100644
--- a/neutron/tests/unit/db/test_ipam_pluggable_backend.py
+++ b/neutron/tests/unit/db/test_ipam_pluggable_backend.py
@@ -71,7 +71,6 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
plugin = 'neutron.tests.unit.db.test_ipam_backend_mixin.TestPlugin'
super(TestDbBasePluginIpam, self).setUp(plugin=plugin)
cfg.CONF.set_override("ipam_driver", 'internal')
- self.tenant_id = uuidutils.generate_uuid()
self.subnet_id = uuidutils.generate_uuid()
self.admin_context = ncontext.get_admin_context()
@@ -89,7 +88,7 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
},
'subnet_request': ipam_req.SpecificSubnetRequest(
- self.tenant_id,
+ self._tenant_id,
self.subnet_id,
'10.0.0.0/24',
'10.0.0.1',
diff --git a/neutron/tests/unit/db/test_l3_db.py b/neutron/tests/unit/db/test_l3_db.py
index 9b65b60f09..43d4ec4a49 100644
--- a/neutron/tests/unit/db/test_l3_db.py
+++ b/neutron/tests/unit/db/test_l3_db.py
@@ -928,7 +928,8 @@ class L3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
with db_api.CONTEXT_WRITER.using(self.ctx):
res = self._create_network(
self.fmt, name, True,
- arg_list=(extnet_apidef.EXTERNAL,), **kwargs)
+ arg_list=(extnet_apidef.EXTERNAL,),
+ as_admin=True, **kwargs)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
diff --git a/neutron/tests/unit/db/test_ovn_revision_numbers_db.py b/neutron/tests/unit/db/test_ovn_revision_numbers_db.py
index 62dfc9caa1..f375d3602d 100644
--- a/neutron/tests/unit/db/test_ovn_revision_numbers_db.py
+++ b/neutron/tests/unit/db/test_ovn_revision_numbers_db.py
@@ -237,7 +237,7 @@ class TestRevisionNumberMaintenance(test_securitygroup.SecurityGroupsTestCase,
'10.0.0.0/24')['subnet']
self._set_net_external(self.net['id'])
info = {'network_id': self.net['id']}
- router = self._make_router(self.fmt, None,
+ router = self._make_router(self.fmt, self._tenant_id,
external_gateway_info=info)['router']
fip = self._make_floatingip(self.fmt, self.net['id'])['floatingip']
port = self._make_port(self.fmt, self.net['id'])['port']
diff --git a/neutron/tests/unit/extensions/test_address_group.py b/neutron/tests/unit/extensions/test_address_group.py
index ff37ba684f..e3339b0320 100644
--- a/neutron/tests/unit/extensions/test_address_group.py
+++ b/neutron/tests/unit/extensions/test_address_group.py
@@ -84,9 +84,8 @@ class AddressGroupTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_address_group_actions(self, addr_group_id, data, action,
expected=None, tenant_id=None):
act_req = self.new_action_request(
- 'address-groups', data, addr_group_id, action)
- act_req.environ['neutron.context'] = context.Context(
- '', tenant_id or self._tenant_id)
+ 'address-groups', data, addr_group_id, action,
+ tenant_id=tenant_id or self._tenant_id)
act_res = act_req.get_response(self.ext_api)
if expected:
diff --git a/neutron/tests/unit/extensions/test_address_scope.py b/neutron/tests/unit/extensions/test_address_scope.py
index 6d5eb30031..7e33980686 100644
--- a/neutron/tests/unit/extensions/test_address_scope.py
+++ b/neutron/tests/unit/extensions/test_address_scope.py
@@ -49,39 +49,40 @@ class AddressScopeTestExtensionManager(object):
class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _create_address_scope(self, fmt, ip_version=constants.IP_VERSION_4,
- expected_res_status=None, admin=False, **kwargs):
+ expected_res_status=None, admin=False,
+ tenant_id=None, **kwargs):
address_scope = {'address_scope': {}}
address_scope['address_scope']['ip_version'] = ip_version
+ tenant_id = tenant_id or self._tenant_id
for k, v in kwargs.items():
address_scope['address_scope'][k] = str(v)
address_scope_req = self.new_create_request('address-scopes',
- address_scope, fmt)
-
- if not admin:
- neutron_context = context.Context('', kwargs.get('tenant_id',
- self._tenant_id))
- address_scope_req.environ['neutron.context'] = neutron_context
+ address_scope, fmt,
+ tenant_id=tenant_id,
+ as_admin=admin)
address_scope_res = address_scope_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, address_scope_res.status_int)
return address_scope_res
- def _make_address_scope(self, fmt, ip_version, admin=False, **kwargs):
+ def _make_address_scope(self, fmt, ip_version, admin=False, tenant_id=None,
+ **kwargs):
res = self._create_address_scope(fmt, ip_version,
- admin=admin, **kwargs)
+ admin=admin, tenant_id=tenant_id,
+ **kwargs)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def address_scope(self, ip_version=constants.IP_VERSION_4,
- admin=False, **kwargs):
- if 'project_id' in kwargs:
- kwargs['tenant_id'] = kwargs['project_id']
+ admin=False, tenant_id=None, **kwargs):
+ tenant_id = tenant_id if tenant_id else kwargs.pop(
+ 'tenant_id', None)
addr_scope = self._make_address_scope(self.fmt, ip_version,
- admin, **kwargs)
+ admin, tenant_id, **kwargs)
yield addr_scope
def _test_create_address_scope(self, ip_version=constants.IP_VERSION_4,
@@ -99,9 +100,9 @@ class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_update_address_scope(self, addr_scope_id, data, admin=False,
expected=None, tenant_id=None):
update_req = self.new_update_request(
- 'address-scopes', data, addr_scope_id)
- update_req.environ['neutron.context'] = context.Context(
- '', tenant_id or self._tenant_id, is_admin=admin)
+ 'address-scopes', data, addr_scope_id,
+ tenant_id=tenant_id or self._tenant_id,
+ as_admin=admin)
update_res = update_req.get_response(self.ext_api)
if expected:
@@ -244,8 +245,7 @@ class TestAddressScope(AddressScopeTestCase):
admin=True)
admin_res = self._list('address-scopes')
mortal_res = self._list(
- 'address-scopes',
- neutron_context=context.Context('', 'not-the-owner'))
+ 'address-scopes', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['address_scopes']))
self.assertEqual(1, len(mortal_res['address_scopes']))
@@ -254,8 +254,7 @@ class TestAddressScope(AddressScopeTestCase):
name='foo-address-scope')
admin_res = self._list('address-scopes')
mortal_res = self._list(
- 'address-scopes',
- neutron_context=context.Context('', 'not-the-owner'))
+ 'address-scopes', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['address_scopes']))
self.assertEqual(0, len(mortal_res['address_scopes']))
diff --git a/neutron/tests/unit/extensions/test_agent.py b/neutron/tests/unit/extensions/test_agent.py
index 2a084bf4dd..0cb1cb7037 100644
--- a/neutron/tests/unit/extensions/test_agent.py
+++ b/neutron/tests/unit/extensions/test_agent.py
@@ -59,11 +59,10 @@ class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2,
class AgentDBTestMixIn(object):
def _list_agents(self, expected_res_status=None,
- neutron_context=None,
query_string=None):
agent_res = self._list('agents',
- neutron_context=neutron_context,
- query_params=query_string)
+ query_params=query_string,
+ as_admin=True)
if expected_res_status:
self.assertEqual(expected_res_status, agent_res.status_int)
return agent_res
@@ -107,14 +106,12 @@ class AgentDBTestCase(AgentDBTestMixIn,
def test_create_agent(self):
data = {'agent': {}}
_req = self.new_create_request('agents', data, self.fmt)
- _req.environ['neutron.context'] = context.Context(
- '', 'tenant_id')
res = _req.get_response(self.ext_api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_list_agent(self):
agents = self._register_agent_states()
- res = self._list('agents')
+ res = self._list('agents', as_admin=True)
self.assertEqual(len(agents), len(res['agents']))
def test_show_agent(self):
@@ -122,7 +119,7 @@ class AgentDBTestCase(AgentDBTestMixIn,
agents = self._list_agents(
query_string='binary=' + constants.AGENT_PROCESS_L3)
self.assertEqual(2, len(agents['agents']))
- agent = self._show('agents', agents['agents'][0]['id'])
+ agent = self._show('agents', agents['agents'][0]['id'], as_admin=True)
self.assertEqual(constants.AGENT_PROCESS_L3, agent['agent']['binary'])
def test_update_agent(self):
@@ -132,13 +129,13 @@ class AgentDBTestCase(AgentDBTestMixIn,
'&host=' + L3_HOSTB))
self.assertEqual(1, len(agents['agents']))
com_id = agents['agents'][0]['id']
- agent = self._show('agents', com_id)
+ agent = self._show('agents', com_id, as_admin=True)
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = False
new_agent['agent']['description'] = 'description'
- self._update('agents', com_id, new_agent)
- agent = self._show('agents', com_id)
+ self._update('agents', com_id, new_agent, as_admin=True)
+ agent = self._show('agents', com_id, as_admin=True)
self.assertFalse(agent['agent']['admin_state_up'])
self.assertEqual('description', agent['agent']['description'])
diff --git a/neutron/tests/unit/extensions/test_availability_zone.py b/neutron/tests/unit/extensions/test_availability_zone.py
index 78534c541f..e00054a026 100644
--- a/neutron/tests/unit/extensions/test_availability_zone.py
+++ b/neutron/tests/unit/extensions/test_availability_zone.py
@@ -71,12 +71,11 @@ class TestAZAgentCase(AZTestCommon):
{'name': 'nova2', 'resource': 'network', 'state': 'available'},
{'name': 'nova2', 'resource': 'router', 'state': 'available'},
{'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}]
- res = self._list('availability_zones')
+ res = self._list('availability_zones', as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected, azs)
# not admin case
- ctx = context.Context('', 'noadmin')
- res = self._list('availability_zones', neutron_context=ctx)
+ res = self._list('availability_zones', as_admin=False)
azs = res['availability_zones']
self.assertCountEqual(expected, azs)
@@ -89,33 +88,37 @@ class TestAZAgentCase(AZTestCommon):
{'name': 'nova2', 'resource': 'network', 'state': 'available'},
{'name': 'nova2', 'resource': 'router', 'state': 'available'},
{'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}]
- res = self._list('availability_zones')
+ res = self._list('availability_zones', as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected, azs)
# list with filter of 'name'
res = self._list('availability_zones',
- query_params="name=nova1")
+ query_params="name=nova1",
+ as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[:1], azs)
# list with filter of 'resource'
res = self._list('availability_zones',
- query_params="resource=router")
+ query_params="resource=router",
+ as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[-2:], azs)
# list with filter of 'state' as 'available'
res = self._list('availability_zones',
- query_params="state=available")
+ query_params="state=available",
+ as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[:3], azs)
# list with filter of 'state' as 'unavailable'
res = self._list('availability_zones',
- query_params="state=unavailable")
+ query_params="state=unavailable",
+ as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[-1:], azs)
def test_list_agent_with_az(self):
helpers.register_dhcp_agent(host='host1', az='nova1')
- res = self._list('agents')
+ res = self._list('agents', as_admin=True)
self.assertEqual('nova1',
res['agents'][0]['availability_zone'])
diff --git a/neutron/tests/unit/extensions/test_data_plane_status.py b/neutron/tests/unit/extensions/test_data_plane_status.py
index a6c99f1b77..f48752c7be 100644
--- a/neutron/tests/unit/extensions/test_data_plane_status.py
+++ b/neutron/tests/unit/extensions/test_data_plane_status.py
@@ -80,7 +80,8 @@ class DataPlaneStatusExtensionTestCase(
data = {'port': {'data_plane_status': constants.ACTIVE}}
req = self.new_update_request(port_def.COLLECTION_NAME,
data,
- port['port']['id'])
+ port['port']['id'],
+ as_admin=True)
res = req.get_response(self.api)
p = self.deserialize(self.fmt, res)['port']
self.assertEqual(200, res.status_code)
@@ -106,9 +107,11 @@ class DataPlaneStatusExtensionTestCase(
with self.port(name='port1') as port:
res = self._update(port_def.COLLECTION_NAME, port['port']['id'],
{'port': {dps_lib.DATA_PLANE_STATUS:
- constants.ACTIVE}})
+ constants.ACTIVE}},
+ as_admin=True)
res = self._update(port_def.COLLECTION_NAME, port['port']['id'],
- {'port': {'name': 'port2'}})
+ {'port': {'name': 'port2'}},
+ as_admin=True)
self.assertEqual(res['port']['name'], 'port2')
self.assertEqual(res['port'][dps_lib.DATA_PLANE_STATUS],
constants.ACTIVE)
@@ -125,7 +128,8 @@ class DataPlaneStatusExtensionTestCase(
with self.port(name='port1') as port:
self._update(port_def.COLLECTION_NAME, port['port']['id'],
{'port': {dps_lib.DATA_PLANE_STATUS:
- constants.ACTIVE}})
+ constants.ACTIVE}},
+ as_admin=True)
notify = set(n['event_type'] for n in fake_notifier.NOTIFICATIONS)
duplicated_notify = expect_notify & notify
self.assertEqual(expect_notify, duplicated_notify)
diff --git a/neutron/tests/unit/extensions/test_default_subnetpools.py b/neutron/tests/unit/extensions/test_default_subnetpools.py
index b089cdc4ad..c0fb2e1bec 100644
--- a/neutron/tests/unit/extensions/test_default_subnetpools.py
+++ b/neutron/tests/unit/extensions/test_default_subnetpools.py
@@ -71,9 +71,13 @@ class DefaultSubnetpoolsExtensionTestCase(
return self.deserialize(self.fmt, res)['subnet']
- def _update_subnetpool(self, subnetpool_id, **data):
+ def _update_subnetpool(self, subnetpool_id, tenant_id=None,
+ as_admin=False, **data):
+ if 'shared' in data or 'is_default' in data:
+ as_admin = True
update_req = self.new_update_request(
- 'subnetpools', {'subnetpool': data}, subnetpool_id)
+ 'subnetpools', {'subnetpool': data}, subnetpool_id,
+ tenant_id=tenant_id, as_admin=as_admin)
res = update_req.get_response(self.api)
return self.deserialize(self.fmt, res)['subnetpool']
diff --git a/neutron/tests/unit/extensions/test_dns.py b/neutron/tests/unit/extensions/test_dns.py
index 34d8f76c98..aca700c94e 100644
--- a/neutron/tests/unit/extensions/test_dns.py
+++ b/neutron/tests/unit/extensions/test_dns.py
@@ -109,10 +109,8 @@ class DnsExtensionTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertEqual(expected_res_status, port_res.status_int)
return port_res
- def _test_list_resources(self, resource, items, neutron_context=None,
- query_params=None):
+ def _test_list_resources(self, resource, items, query_params=None):
res = self._list('%ss' % resource,
- neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertCountEqual([i['id'] for i in res['%ss' % resource]],
diff --git a/neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py b/neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py
index 7b2561b53b..f11d8c03bb 100644
--- a/neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py
+++ b/neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py
@@ -112,7 +112,8 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as extnet, self.network() as innet:
+ with self.network(as_admin=True, **kwargs) as extnet, \
+ self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'), \
self.subnet(network=innet, cidr='10.0.0.0/24') as insub, \
self.router() as router:
@@ -148,7 +149,8 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as extnet, self.network() as innet:
+ with self.network(as_admin=True, **kwargs) as extnet,\
+ self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'),\
self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\
self.router() as router:
@@ -241,7 +243,8 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as extnet, self.network() as innet:
+ with self.network(as_admin=True, **kwargs) as extnet,\
+ self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'),\
self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\
self.subnet(network=innet, cidr='10.0.8.0/24') as insub2,\
@@ -317,10 +320,11 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as extnet, self.network() as innet:
+ with self.network(as_admin=True, **kwargs) as extnet,\
+ self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'),\
self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\
- self.router(distributed=True) as router:
+ self.router(distributed=True, as_admin=True) as router:
fip = self._make_floatingip(self.fmt, extnet['network']['id'])
# check the floatingip response contains port_forwarding field
self.assertIn(apidef.COLLECTION_NAME, fip['floatingip'])
diff --git a/neutron/tests/unit/extensions/test_external_net.py b/neutron/tests/unit/extensions/test_external_net.py
index edc76a2dbe..6e22ab658a 100644
--- a/neutron/tests/unit/extensions/test_external_net.py
+++ b/neutron/tests/unit/extensions/test_external_net.py
@@ -65,7 +65,8 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _set_net_external(self, net_id):
self._update('networks', net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
def test_list_nets_external(self):
with self.network() as n1:
@@ -111,13 +112,14 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'network': {'router:external': True}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
- req.environ['neutron.context'] = context.Context('', 'noadmin')
+ network['network']['id'],
+ tenant_id='noadmin')
res = req.get_response(self.api)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
def test_update_network_external_net_with_ports_set_not_shared(self):
- with self.network(router__external=True, shared=True) as ext_net,\
+ with self.network(router__external=True, shared=True,
+ as_admin=True) as ext_net,\
self.subnet(network=ext_net) as ext_subnet, \
self.port(subnet=ext_subnet,
tenant_id='',
@@ -125,7 +127,8 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- ext_net['network']['id'])
+ ext_net['network']['id'],
+ as_admin=True)
res = req.get_response(self.api)
self.assertEqual(exc.HTTPOk.code, res.status_int)
ctx = context.Context(None, None, is_admin=True)
@@ -158,18 +161,18 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt2))
def test_create_port_external_network_non_admin_fails(self):
- with self.network(router__external=True) as ext_net:
+ with self.network(as_admin=True, router__external=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with testtools.ExpectedException(
exc.HTTPClientError) as ctx_manager:
with self.port(subnet=ext_subnet,
- set_context='True',
+ is_admin=False,
tenant_id='noadmin'):
pass
self.assertEqual(403, ctx_manager.exception.code)
def test_create_port_external_network_admin_succeeds(self):
- with self.network(router__external=True) as ext_net:
+ with self.network(router__external=True, as_admin=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with self.port(subnet=ext_subnet) as port:
self.assertEqual(port['port']['network_id'],
@@ -178,13 +181,13 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def test_create_external_network_non_admin_fails(self):
with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager:
with self.network(router__external=True,
- set_context='True',
+ as_admin=False,
tenant_id='noadmin'):
pass
self.assertEqual(403, ctx_manager.exception.code)
def test_create_external_network_admin_succeeds(self):
- with self.network(router__external=True) as ext_net:
+ with self.network(router__external=True, as_admin=True) as ext_net:
self.assertTrue(ext_net['network'][extnet_apidef.EXTERNAL])
def test_delete_network_check_disassociated_floatingips(self):
diff --git a/neutron/tests/unit/extensions/test_extraroute.py b/neutron/tests/unit/extensions/test_extraroute.py
index 4e6b948dbd..48866e2f42 100644
--- a/neutron/tests/unit/extensions/test_extraroute.py
+++ b/neutron/tests/unit/extensions/test_extraroute.py
@@ -17,7 +17,6 @@ from neutron_lib.api.definitions import external_net as enet_apidef
from neutron_lib.api.definitions import extraroute as xroute_apidef
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib import constants
-from neutron_lib import context
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -62,14 +61,15 @@ class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin,
class ExtraRouteDBTestCaseBase(object):
def _routes_update_prepare(
self, router_id, subnet_id,
- port_id, routes, skip_add=False, tenant_id=None):
+ port_id, routes, skip_add=False, tenant_id=None, as_admin=False):
if not skip_add:
self._router_interface_action(
- 'add', router_id, subnet_id, port_id, tenant_id=None)
- ctxt = context.Context('', tenant_id) if tenant_id else None
+ 'add', router_id, subnet_id, port_id, tenant_id=tenant_id,
+ as_admin=as_admin)
+ tenant_id = tenant_id or self._tenant_id
self._update('routers', router_id, {'router': {'routes': routes}},
- neutron_context=ctxt)
- return self._show('routers', router_id)
+ request_tenant_id=tenant_id, as_admin=as_admin)
+ return self._show('routers', router_id, tenant_id=tenant_id)
def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes):
self._update('routers', router_id, {'router': {'routes': routes}})
@@ -91,7 +91,8 @@ class ExtraRouteDBTestCaseBase(object):
def test_route_update_with_external_route(self):
my_tenant = 'tenant1'
with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as ext_subnet,\
- self.port(subnet=ext_subnet) as nexthop_port:
+ self.port(subnet=ext_subnet,
+ tenant_id='notme') as nexthop_port:
nexthop_ip = nexthop_port['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': nexthop_ip}]
@@ -107,14 +108,14 @@ class ExtraRouteDBTestCaseBase(object):
def test_route_update_with_route_via_another_tenant_subnet(self):
my_tenant = 'tenant1'
with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as subnet,\
- self.port(subnet=subnet) as nexthop_port:
+ self.port(subnet=subnet, tenant_id='notme') as nexthop_port:
nexthop_ip = nexthop_port['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': nexthop_ip}]
with self.router(tenant_id=my_tenant) as r:
body = self._routes_update_prepare(
r['router']['id'], subnet['subnet']['id'], None, routes,
- tenant_id=my_tenant)
+ tenant_id=my_tenant, as_admin=True)
self.assertEqual(routes, body['router']['routes'])
def test_route_clear_routes_with_None(self):
diff --git a/neutron/tests/unit/extensions/test_flavors.py b/neutron/tests/unit/extensions/test_flavors.py
index cf8dadc707..5589a88356 100644
--- a/neutron/tests/unit/extensions/test_flavors.py
+++ b/neutron/tests/unit/extensions/test_flavors.py
@@ -198,7 +198,9 @@ class FlavorExtensionTestCase(extension.ExtensionTestCase):
'service_profiles': ['profile-1']}}
instance = self.plugin.return_value
instance.get_flavor.return_value = expected['flavor']
- res = self.api.get(_get_path('flavors', id=flavor_id, fmt=self.fmt))
+ res = self.api.get(
+ _get_path('flavors', id=flavor_id, fmt=self.fmt),
+ extra_environ=test_base._get_neutron_env(as_admin=True))
instance.get_flavor.assert_called_with(mock.ANY,
flavor_id,
fields=mock.ANY)
@@ -218,7 +220,9 @@ class FlavorExtensionTestCase(extension.ExtensionTestCase):
'service_profiles': ['profile-2', 'profile-1']}]}
instance = self.plugin.return_value
instance.get_flavors.return_value = data['flavors']
- res = self.api.get(_get_path('flavors', fmt=self.fmt))
+ res = self.api.get(
+ _get_path('flavors', fmt=self.fmt),
+ extra_environ=test_base._get_neutron_env(as_admin=True))
instance.get_flavors.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
diff --git a/neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py b/neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py
index b4658a180b..b90b0c73d7 100644
--- a/neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py
+++ b/neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py
@@ -14,7 +14,6 @@
from unittest import mock
-from neutron_lib import context
from oslo_utils import uuidutils
from webob import exc
@@ -50,8 +49,9 @@ class FloatingIPPorForwardingTestCase(test_l3.L3BaseForIntTests,
tenant_id=None,
description=None,
external_port_range=None,
- internal_port_range=None):
- tenant_id = tenant_id or _uuid()
+ internal_port_range=None,
+ as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
data = {'port_forwarding': {
"protocol": protocol,
"internal_ip_address": internal_ip_address,
@@ -69,28 +69,29 @@ class FloatingIPPorForwardingTestCase(test_l3.L3BaseForIntTests,
if description:
data['port_forwarding']['description'] = description
- fip_pf_req = self._req(
- 'POST', 'floatingips', data,
- fmt or self.fmt, id=floating_ip_id,
- subresource='port_forwardings')
-
- fip_pf_req.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ fip_pf_req = self.new_create_request(
+ 'floatingips', data, fmt or self.fmt, floating_ip_id,
+ subresource='port_forwardings',
+ tenant_id=tenant_id, as_admin=as_admin)
return fip_pf_req.get_response(self.ext_api)
def _update_fip_port_forwarding(self, fmt, floating_ip_id,
- port_forwarding_id, **kwargs):
+ port_forwarding_id,
+ req_tenant_id=None, as_admin=False,
+ **kwargs):
+ req_tenant_id = req_tenant_id or self._tenant_id
port_forwarding = {}
for k, v in kwargs.items():
port_forwarding[k] = v
data = {'port_forwarding': port_forwarding}
- fip_pf_req = self._req(
- 'PUT', 'floatingips', data,
- fmt or self.fmt, id=floating_ip_id,
+ fip_pf_req = self.new_update_request(
+ 'floatingips', data, floating_ip_id, fmt or self.fmt,
sub_id=port_forwarding_id,
- subresource='port_forwardings')
+ subresource='port_forwardings',
+ tenant_id=req_tenant_id,
+ as_admin=as_admin)
return fip_pf_req.get_response(self.ext_api)
diff --git a/neutron/tests/unit/extensions/test_l3.py b/neutron/tests/unit/extensions/test_l3.py
index ab6be0ad52..f07472fcbc 100644
--- a/neutron/tests/unit/extensions/test_l3.py
+++ b/neutron/tests/unit/extensions/test_l3.py
@@ -377,10 +377,10 @@ class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin,
class L3NatTestCaseMixin(object):
- def _create_router(self, fmt, tenant_id, name=None,
- admin_state_up=None, set_context=False,
- arg_list=None, **kwargs):
- tenant_id = tenant_id or _uuid()
+ def _create_router(self, fmt, tenant_id=None, name=None,
+ admin_state_up=None, arg_list=None,
+ as_admin=False, **kwargs):
+ tenant_id = tenant_id or self._tenant_id
data = {'router': {'tenant_id': tenant_id}}
if name:
data['router']['name'] = name
@@ -400,29 +400,27 @@ class L3NatTestCaseMixin(object):
if 'enable_ndp_proxy' in kwargs:
data['router']['enable_ndp_proxy'] = \
bool(kwargs['enable_ndp_proxy'])
- router_req = self.new_create_request('routers', data, fmt)
- if set_context and tenant_id:
- # create a specific auth context for this request
- router_req.environ['neutron.context'] = context.Context(
- '', tenant_id)
+ router_req = self.new_create_request('routers', data, fmt,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
return router_req.get_response(self.ext_api)
- def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None,
- external_gateway_info=None, set_context=False,
- arg_list=None, **kwargs):
+ def _make_router(self, fmt, tenant_id=None, name=None, admin_state_up=None,
+ external_gateway_info=None,
+ arg_list=None, as_admin=False, **kwargs):
if external_gateway_info:
arg_list = ('external_gateway_info', ) + (arg_list or ())
res = self._create_router(fmt, tenant_id, name,
- admin_state_up, set_context,
+ admin_state_up,
arg_list=arg_list,
external_gateway_info=external_gateway_info,
- **kwargs)
+ as_admin=as_admin, **kwargs)
return self.deserialize(fmt, res)
def _add_external_gateway_to_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
- neutron_context=None, ext_ips=None,
+ ext_ips=None, as_admin=False,
**kwargs):
ext_ips = ext_ips or []
body = {'router':
@@ -435,7 +433,7 @@ class L3NatTestCaseMixin(object):
'qos_policy_id'] = kwargs.get('policy_id')
return self._update('routers', router_id, body,
expected_code=expected_code,
- neutron_context=neutron_context)
+ as_admin=as_admin)
def _remove_external_gateway_from_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
@@ -449,7 +447,8 @@ class L3NatTestCaseMixin(object):
expected_code=exc.HTTPOk.code,
expected_body=None,
tenant_id=None,
- msg=None):
+ msg=None,
+ as_admin=False):
interface_data = {}
if subnet_id is not None:
interface_data.update({'subnet_id': subnet_id})
@@ -457,11 +456,8 @@ class L3NatTestCaseMixin(object):
interface_data.update({'port_id': port_id})
req = self.new_action_request('routers', interface_data, router_id,
- "%s_router_interface" % action)
- # if tenant_id was specified, create a tenant context for this request
- if tenant_id:
- req.environ['neutron.context'] = context.Context(
- '', tenant_id)
+ "%s_router_interface" % action,
+ tenant_id=tenant_id, as_admin=as_admin)
res = req.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int, msg)
response = self.deserialize(self.fmt, res)
@@ -472,23 +468,23 @@ class L3NatTestCaseMixin(object):
@contextlib.contextmanager
def router(self, name='router1', admin_state_up=True,
fmt=None, project_id=None,
- external_gateway_info=None, set_context=False,
+ external_gateway_info=None, as_admin=False,
**kwargs):
tenant_id = project_id if project_id else kwargs.pop(
'tenant_id', None)
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
- set_context, **kwargs)
+ as_admin=as_admin, **kwargs)
yield router
def _set_net_external(self, net_id):
self._update('networks', net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
def _create_floatingip(self, fmt, network_id, port_id=None,
- fixed_ip=None, set_context=False,
- floating_ip=None, subnet_id=None,
- tenant_id=None, **kwargs):
+ fixed_ip=None, floating_ip=None, subnet_id=None,
+ tenant_id=None, as_admin=False, **kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'floatingip': {'floating_network_id': network_id,
'tenant_id': tenant_id}}
@@ -505,20 +501,18 @@ class L3NatTestCaseMixin(object):
data['floatingip'].update(kwargs)
- floatingip_req = self.new_create_request('floatingips', data, fmt)
- if set_context and tenant_id:
- # create a specific auth context for this request
- floatingip_req.environ['neutron.context'] = context.Context(
- '', tenant_id)
+ floatingip_req = self.new_create_request(
+ 'floatingips', data, fmt, tenant_id=tenant_id, as_admin=as_admin)
return floatingip_req.get_response(self.ext_api)
def _make_floatingip(self, fmt, network_id, port_id=None,
- fixed_ip=None, set_context=False, tenant_id=None,
+ fixed_ip=None, tenant_id=None,
floating_ip=None, http_status=exc.HTTPCreated.code,
- **kwargs):
+ as_admin=False, **kwargs):
res = self._create_floatingip(fmt, network_id, port_id,
- fixed_ip, set_context, floating_ip,
- tenant_id=tenant_id, **kwargs)
+ fixed_ip, floating_ip,
+ tenant_id=tenant_id, as_admin=as_admin,
+ **kwargs)
self.assertEqual(http_status, res.status_int)
return self.deserialize(fmt, res)
@@ -534,16 +528,15 @@ class L3NatTestCaseMixin(object):
@contextlib.contextmanager
def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None,
- public_cidr='11.0.0.0/24', set_context=False,
- project_id=None, flavor_id=None, **kwargs):
+ public_cidr='11.0.0.0/24', project_id=None,
+ flavor_id=None, as_admin=False, **kwargs):
tenant_id = project_id if project_id else kwargs.pop(
'tenant_id', None)
with self.subnet(cidr=public_cidr,
- set_context=set_context,
- tenant_id=tenant_id) as public_sub:
+ tenant_id=tenant_id,
+ as_admin=as_admin) as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
- args_list = {'set_context': set_context,
- 'tenant_id': tenant_id}
+ args_list = {'tenant_id': tenant_id}
if flavor_id:
args_list['flavor_id'] = flavor_id
private_port = None
@@ -551,8 +544,8 @@ class L3NatTestCaseMixin(object):
private_port = self._show('ports', port_id)
with test_db_base_plugin_v2.optional_ctx(
private_port, self.port,
- set_context=set_context,
- tenant_id=tenant_id) as private_port:
+ tenant_id=tenant_id,
+ is_admin=as_admin) as private_port:
with self.router(**args_list) as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
@@ -571,7 +564,7 @@ class L3NatTestCaseMixin(object):
port_id=private_port['port']['id'],
fixed_ip=fixed_ip,
tenant_id=tenant_id,
- set_context=set_context,
+ as_admin=as_admin,
**kwargs)
yield floatingip
@@ -581,8 +574,8 @@ class L3NatTestCaseMixin(object):
@contextlib.contextmanager
def floatingip_no_assoc_with_public_sub(self, private_sub, fmt=None,
- set_context=False, public_sub=None,
- flavor_id=None, **kwargs):
+ public_sub=None, flavor_id=None,
+ as_admin=False, **kwargs):
if 'project_id' in kwargs:
kwargs['tenant_id'] = kwargs['project_id']
self._set_net_external(public_sub['subnet']['network_id'])
@@ -606,7 +599,7 @@ class L3NatTestCaseMixin(object):
floatingip = self._make_floatingip(
fmt or self.fmt,
public_sub['subnet']['network_id'],
- set_context=set_context,
+ as_admin=as_admin,
**kwargs)
yield floatingip, r
@@ -615,14 +608,14 @@ class L3NatTestCaseMixin(object):
floatingip['floatingip']['id'])
@contextlib.contextmanager
- def floatingip_no_assoc(self, private_sub, fmt=None,
- set_context=False, flavor_id=None, **kwargs):
+ def floatingip_no_assoc(self, private_sub, fmt=None, flavor_id=None,
+ as_admin=False, **kwargs):
if 'project_id' in kwargs:
kwargs['tenant_id'] = kwargs['project_id']
with self.subnet(cidr='12.0.0.0/24') as public_sub:
with self.floatingip_no_assoc_with_public_sub(
- private_sub, fmt, set_context, public_sub,
- flavor_id, **kwargs) as (f, r):
+ private_sub, fmt, public_sub, flavor_id,
+ as_admin=as_admin, **kwargs) as (f, r):
# Yield only the floating ip object
yield f
@@ -707,10 +700,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_create_with_gwinfo(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
- data = {'router': {'tenant_id': _uuid()}}
- data['router']['name'] = 'router1'
- data['router']['external_gateway_info'] = {
- 'network_id': s['subnet']['network_id']}
+ data = {'router': {
+ 'name': 'router1',
+ 'external_gateway_info': {
+ 'network_id': s['subnet']['network_id']}}}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
@@ -726,8 +719,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'external_fixed_ips': [{'ip_address': '10.0.0.99'}]
}
res = self._create_router(
- self.fmt, _uuid(), arg_list=('external_gateway_info',),
- external_gateway_info=ext_info
+ self.fmt, arg_list=('external_gateway_info',),
+ external_gateway_info=ext_info,
+ as_admin=True
)
router = self.deserialize(self.fmt, res)
self.assertEqual(
@@ -749,8 +743,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
{'subnet_id': s['subnet']['id']}]
}
res = self._create_router(
- self.fmt, _uuid(), arg_list=('external_gateway_info',),
- external_gateway_info=ext_info
+ self.fmt,
+ arg_list=('external_gateway_info',),
+ external_gateway_info=ext_info,
+ as_admin=True
)
router = self.deserialize(self.fmt, res)
ext_ips = router['router']['external_gateway_info'][
@@ -768,8 +764,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'external_fixed_ips': [{'ip_address': '10.0.0.99'}]
}
res = self._create_router(
- self.fmt, _uuid(), arg_list=('external_gateway_info',),
- set_context=True, external_gateway_info=ext_info
+ self.fmt, arg_list=('external_gateway_info',),
+ external_gateway_info=ext_info
)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
@@ -873,7 +869,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
r['router']['id'],
s['subnet']['network_id'],
ext_ips=[{'ip_address': s['subnet']['gateway_ip']}],
- expected_code=exc.HTTPBadRequest.code)
+ expected_code=exc.HTTPBadRequest.code,
+ as_admin=True)
def test_router_update_gateway_with_invalid_external_ip(self):
with self.router() as r:
@@ -883,7 +880,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
r['router']['id'],
s['subnet']['network_id'],
ext_ips=[{'ip_address': '99.99.99.99'}],
- expected_code=exc.HTTPBadRequest.code)
+ expected_code=exc.HTTPBadRequest.code,
+ as_admin=True)
def test_router_update_gateway_with_invalid_external_subnet(self):
with self.subnet() as s1,\
@@ -895,7 +893,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
s1['subnet']['network_id'],
# this subnet is not on the same network so this should fail
ext_ips=[{'subnet_id': s2['subnet']['id']}],
- expected_code=exc.HTTPBadRequest.code)
+ expected_code=exc.HTTPBadRequest.code,
+ as_admin=True)
def test_router_update_gateway_with_different_external_subnet(self):
with self.network() as n:
@@ -906,11 +905,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res1 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s1['subnet']['id']}])
+ ext_ips=[{'subnet_id': s1['subnet']['id']}],
+ as_admin=True)
res2 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s2['subnet']['id']}])
+ ext_ips=[{'subnet_id': s2['subnet']['id']}],
+ as_admin=True)
fip1 = res1['router']['external_gateway_info']['external_fixed_ips'][0]
fip2 = res2['router']['external_gateway_info']['external_fixed_ips'][0]
self.assertEqual(s1['subnet']['id'], fip1['subnet_id'])
@@ -944,7 +945,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res1 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s1['subnet']['id']}])
+ ext_ips=[{'subnet_id': s1['subnet']['id']}],
+ as_admin=True)
fip1 = (res1['router']['external_gateway_info']
['external_fixed_ips'][0])
self.assertEqual(s1['subnet']['id'], fip1['subnet_id'])
@@ -953,7 +955,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
n['network']['id'],
ext_ips=[{'ip_address': fip1['ip_address'],
'subnet_id': s1['subnet']['id']},
- {'subnet_id': s2['subnet']['id']}])
+ {'subnet_id': s2['subnet']['id']}],
+ as_admin=True)
self.assertEqual(fip1, res2['router']['external_gateway_info']
['external_fixed_ips'][0])
fip2 = (res2['router']['external_gateway_info']
@@ -971,7 +974,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s1['subnet']['id']}])
+ ext_ips=[{'subnet_id': s1['subnet']['id']}],
+ as_admin=True)
plugin = directory.get_plugin(plugin_constants.L3)
mock.patch.object(
plugin, 'update_router',
@@ -990,7 +994,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res1 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s1['subnet']['id']}])
+ ext_ips=[{'subnet_id': s1['subnet']['id']}],
+ as_admin=True)
fip1 = (res1['router']['external_gateway_info']
['external_fixed_ips'][0])
sres = self._create_subnet(self.fmt, net_id=n['network']['id'],
@@ -1028,7 +1033,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
n['network']['id'],
ext_ips=[{'subnet_id': s1['subnet']['id']},
{'subnet_id': s2['subnet']['id']}],
- expected_code=exc.HTTPOk.code)
+ expected_code=exc.HTTPOk.code,
+ as_admin=True)
res1 = self._show('routers', r['router']['id'])
original_fips = (res1['router']['external_gateway_info']
['external_fixed_ips'])
@@ -1309,9 +1315,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_add_interface_subnet_with_bad_tenant_returns_404(self):
tenant_id = _uuid()
- with self.router(tenant_id=tenant_id, set_context=True) as r:
- with self.network(tenant_id=tenant_id, set_context=True) as n:
- with self.subnet(network=n, set_context=True) as s:
+ with self.router(tenant_id=tenant_id) as r:
+ with self.network(tenant_id=tenant_id) as n:
+ with self.subnet(network=n, tenant_id=tenant_id) as s:
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
@@ -1322,7 +1328,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
body = self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
- None)
+ None,
+ tenant_id=tenant_id)
self.assertIn('port_id', body)
self._router_interface_action('remove',
r['router']['id'],
@@ -1334,8 +1341,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_add_interface_by_subnet_other_tenant_subnet_returns_400(
self):
router_tenant_id = _uuid()
- with self.router(tenant_id=router_tenant_id, set_context=True) as r:
- with self.network(shared=True) as n:
+ with self.router(tenant_id=router_tenant_id) as r:
+ with self.network(as_admin=True, shared=True) as n:
with self.subnet(network=n) as s:
err_code = exc.HTTPBadRequest.code
self._router_interface_action('add',
@@ -1350,10 +1357,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
):
router_tenant_id = _uuid()
with mock.patch.object(network_obj.NetworkRBAC, "get_projects") as g:
- with self.router(
- tenant_id=router_tenant_id, set_context=True
- ) as r:
- with self.network(shared=True) as n:
+ with self.router(tenant_id=router_tenant_id) as r:
+ with self.network(as_admin=True, shared=True) as n:
with self.subnet(network=n) as s:
g.return_value = [router_tenant_id]
self._router_interface_action(
@@ -1369,8 +1374,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self, out_of_pool=False, router_action_as_admin=False,
expected_code=exc.HTTPOk.code):
router_tenant_id = _uuid()
- with self.router(tenant_id=router_tenant_id, set_context=True) as r:
- with self.network(shared=True) as n:
+ with self.router(tenant_id=router_tenant_id) as r:
+ with self.network(as_admin=True, shared=True) as n:
with self.subnet(network=n) as s1, (
self.subnet(network=n, cidr='fd00::/64',
ip_version=lib_constants.IP_VERSION_6)
@@ -1386,13 +1391,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'ip_address':
s2['subnet']['gateway_ip']}
with self.port(subnet=s1, fixed_ips=fixed_ips,
- tenant_id=router_tenant_id) as p:
- kwargs = {'expected_code': expected_code}
- if not router_action_as_admin:
- kwargs['tenant_id'] = router_tenant_id
+ tenant_id=router_tenant_id,
+ is_admin=True) as p:
self._router_interface_action(
'add', r['router']['id'], None, p['port']['id'],
- **kwargs)
+ expected_code=expected_code,
+ tenant_id=router_tenant_id,
+ as_admin=router_action_as_admin)
def test_router_add_interface_by_port_other_tenant_address_in_pool(
self):
@@ -1414,13 +1419,17 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
with self.router(tenant_id=tenant_id) as r,\
self.network(tenant_id=tenant_id) as n1,\
self.network(tenant_id=other_tenant_id) as n2:
- with self.subnet(network=n1, cidr='10.0.0.0/24') as s1,\
- self.subnet(network=n2, cidr='10.1.0.0/24') as s2:
+ with self.subnet(network=n1, cidr='10.0.0.0/24',
+ tenant_id=tenant_id) as s1,\
+ self.subnet(network=n2, cidr='10.1.0.0/24',
+ tenant_id=other_tenant_id) as s2:
body = self._router_interface_action(
'add',
r['router']['id'],
s2['subnet']['id'],
- None)
+ None,
+ tenant_id=other_tenant_id,
+ as_admin=True)
self.assertIn('port_id', body)
self._router_interface_action(
'add',
@@ -1472,7 +1481,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
{'ip_address': '1.1.1.1'},
{'ip_address': '2.2.2.2'}]}}
self._update('ports', p['port']['id'], data,
- neutron_context=context.get_admin_context(),
+ as_admin=True,
expected_code=exc.HTTPBadRequest.code)
self._router_interface_action('remove',
@@ -1666,12 +1675,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_add_interface_port_bad_tenant_returns_404(self):
tenant_id = _uuid()
- with self.router(tenant_id=tenant_id, set_context=True) as r:
- with self.network(tenant_id=tenant_id, set_context=True) as n:
- with self.subnet(tenant_id=tenant_id, network=n,
- set_context=True) as s:
- with self.port(tenant_id=tenant_id, subnet=s,
- set_context=True) as p:
+ with self.router(tenant_id=tenant_id) as r:
+ with self.network(tenant_id=tenant_id) as n:
+ with self.subnet(tenant_id=tenant_id, network=n) as s:
+ with self.port(tenant_id=tenant_id, subnet=s) as p:
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
@@ -1837,7 +1844,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res = self._add_external_gateway_to_router(
r['router']['id'], ext_net_id,
ext_ips=[{'subnet_id': s1['subnet']['id']}],
- expected_code=exc.HTTPBadRequest.code)
+ expected_code=exc.HTTPBadRequest.code,
+ as_admin=True)
expected_msg = (
"Bad router request: Cidr 10.0.2.0/24 of subnet "
"%(external_subnet_id)s overlaps with cidr 10.0.2.0/24 of "
@@ -1967,15 +1975,12 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self.assertIsNone(gw_info)
def test_router_add_and_remove_gateway_tenant_ctx(self):
- with self.router(tenant_id='noadmin',
- set_context=True) as r:
+ with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
- ctx = context.Context('', 'noadmin')
self._add_external_gateway_to_router(
r['router']['id'],
- s['subnet']['network_id'],
- neutron_context=ctx)
+ s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
@@ -1988,8 +1993,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_create_router_port_with_device_id_of_other_tenants_router(self):
with self.router() as admin_router:
- with self.network(tenant_id='tenant_a',
- set_context=True) as n:
+ with self.network(tenant_id='tenant_a') as n:
with self.subnet(network=n):
for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS:
self._create_port(
@@ -1997,7 +2001,6 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
tenant_id='tenant_a',
device_id=admin_router['router']['id'],
device_owner=device_owner,
- set_context=True,
expected_res_status=exc.HTTPConflict.code)
def test_create_non_router_port_device_id_of_other_tenants_router_update(
@@ -2006,38 +2009,32 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
# port that matches the device_id of another tenants router and then
# we change the device_owner to be network:router_interface.
with self.router() as admin_router:
- with self.network(tenant_id='tenant_a',
- set_context=True) as n:
+ with self.network(tenant_id='tenant_a') as n:
with self.subnet(network=n):
for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS:
port_res = self._create_port(
self.fmt, n['network']['id'],
tenant_id='tenant_a',
- device_id=admin_router['router']['id'],
- set_context=True)
+ device_id=admin_router['router']['id'])
port = self.deserialize(self.fmt, port_res)
- neutron_context = context.Context('', 'tenant_a')
data = {'port': {'device_owner': device_owner}}
self._update('ports', port['port']['id'], data,
- neutron_context=neutron_context,
- expected_code=exc.HTTPConflict.code)
+ expected_code=exc.HTTPConflict.code,
+ request_tenant_id='tenant_a')
def test_update_port_device_id_to_different_tenants_router(self):
with self.router() as admin_router:
- with self.router(tenant_id='tenant_a',
- set_context=True) as tenant_router:
- with self.network(tenant_id='tenant_a',
- set_context=True) as n:
+ with self.router(tenant_id='tenant_a') as tenant_router:
+ with self.network(tenant_id='tenant_a') as n:
with self.subnet(network=n) as s:
port = self._router_interface_action(
'add', tenant_router['router']['id'],
s['subnet']['id'], None, tenant_id='tenant_a')
- neutron_context = context.Context('', 'tenant_a')
data = {'port':
{'device_id': admin_router['router']['id']}}
self._update('ports', port['port_id'], data,
- neutron_context=neutron_context,
- expected_code=exc.HTTPConflict.code)
+ expected_code=exc.HTTPConflict.code,
+ request_tenant_id='tenant_a')
def test_router_add_gateway_invalid_network_returns_400(self):
with self.router() as r:
@@ -2122,7 +2119,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res = self._add_external_gateway_to_router(
r['router']['id'], n['network']['id'],
ext_ips=[{'subnet_id': s['subnet']['id'],
- 'ip_address': '10.0.0.4'}])
+ 'ip_address': '10.0.0.4'}],
+ as_admin=True)
gw_info = res['router']['external_gateway_info']
ext_ips = gw_info['external_fixed_ips'][0]
expected_gw_ips = [ext_ips['ip_address']]
@@ -2314,7 +2312,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_delete_with_port_existed_returns_409(self):
with self.subnet() as subnet:
- res = self._create_router(self.fmt, _uuid())
+ res = self._create_router(self.fmt)
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
@@ -2329,7 +2327,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
- res = self._create_router(self.fmt, _uuid())
+ res = self._create_router(self.fmt)
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
@@ -2346,12 +2344,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_show(self):
name = 'router1'
- tenant_id = _uuid()
- expected_value = [('name', name), ('tenant_id', tenant_id),
+ expected_value = [('name', name), ('tenant_id', self._tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
- with self.router(name='router1', admin_state_up=True,
- tenant_id=tenant_id) as router:
+ with self.router(name='router1', admin_state_up=True) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
@@ -2365,7 +2361,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
s1['subnet']['network_id'])
self._update('networks', s1['subnet']['network_id'],
{'network': {extnet_apidef.EXTERNAL: False}},
- expected_code=exc.HTTPConflict.code)
+ expected_code=exc.HTTPConflict.code,
+ as_admin=True)
def test_network_update_external(self):
with self.router() as r:
@@ -2377,7 +2374,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
r['router']['id'],
s1['subnet']['network_id'])
self._update('networks', testnet['network']['id'],
- {'network': {extnet_apidef.EXTERNAL: False}})
+ {'network': {extnet_apidef.EXTERNAL: False}},
+ as_admin=True)
def test_floatingip_crd_ops(self):
with self.floatingip_with_assoc() as fip:
@@ -2457,8 +2455,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self._make_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
- port_id=private_port['port']['id'],
- set_context=False)
+ port_id=private_port['port']['id'])
self.assertTrue(agent_notification.called)
def test_floating_port_status_not_applicable(self):
@@ -2903,23 +2900,23 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
with self.subnet(cidr='11.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as private_port:
- with self.router(tenant_id='router-owner',
- set_context=True) as r:
+ with self.router(tenant_id='router-owner') as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
self._add_external_gateway_to_router(
r['router']['id'],
- public_sub['subnet']['network_id'])
+ public_sub['subnet']['network_id'],
+ as_admin=True)
self._router_interface_action(
'add', r['router']['id'],
- private_sub['subnet']['id'], None)
+ private_sub['subnet']['id'], None,
+ as_admin=True)
self._make_floatingip(self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'],
- fixed_ip=None,
- set_context=True)
+ fixed_ip=None)
def test_floatingip_update_different_router(self):
# Create subnet with different CIDRs to account for plugins which
@@ -2983,10 +2980,12 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_floatingip_update_different_port_owner_as_admin(self):
with self.subnet() as private_sub:
with self.floatingip_no_assoc(private_sub) as fip:
- with self.port(subnet=private_sub, tenant_id='other') as p:
+ with self.port(subnet=private_sub, tenant_id='other',
+ is_admin=True) as p:
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip':
- {'port_id': p['port']['id']}})
+ {'port_id': p['port']['id']}},
+ as_admin=True)
self.assertEqual(p['port']['id'],
body['floatingip']['port_id'])
@@ -3032,7 +3031,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
- res = self._create_router(self.fmt, _uuid())
+ res = self._create_router(self.fmt)
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
@@ -3060,8 +3059,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res = self._create_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
- subnet_id=public_sub['subnet']['id'],
- set_context=True)
+ subnet_id=public_sub['subnet']['id'])
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def test_create_floatingip_with_subnet_id_and_fip_address(self):
@@ -3073,7 +3071,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self.fmt,
ext_net['network']['id'],
subnet_id=ext_subnet['subnet']['id'],
- floating_ip='10.10.10.100')
+ floating_ip='10.10.10.100',
+ as_admin=True)
fip = self.deserialize(self.fmt, res)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
self.assertEqual('10.10.10.100',
@@ -3088,7 +3087,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self.fmt,
ext_net['network']['id'],
subnet_id=ext_subnet['subnet']['id'],
- floating_ip='20.20.20.200')
+ floating_ip='20.20.20.200',
+ as_admin=True)
data = self.deserialize(self.fmt, res)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
msg = str(n_exc.InvalidIpForSubnet(ip_address='20.20.20.200'))
@@ -3472,7 +3472,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
fp = self._make_floatingip(self.fmt, network_id,
- floating_ip='10.0.0.10')
+ floating_ip='10.0.0.10',
+ as_admin=True)
self.assertEqual('10.0.0.10',
fp['floatingip']['floating_ip_address'])
@@ -3484,18 +3485,17 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
fp = self._make_floatingip(self.fmt, network_id,
- floating_ip='10.0.0.30')
+ floating_ip='10.0.0.30',
+ as_admin=True)
self.assertEqual('10.0.0.30',
fp['floatingip']['floating_ip_address'])
def test_create_floatingip_with_specific_ip_non_admin(self):
- ctx = context.Context('user_id', 'tenant_id')
-
with self.subnet(cidr='10.0.0.0/24') as s:
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
self._make_floatingip(self.fmt, network_id,
- set_context=ctx,
+ tenant_id='tenant_id',
floating_ip='10.0.0.10',
http_status=exc.HTTPForbidden.code)
@@ -3506,7 +3506,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self._set_net_external(network_id)
self._make_floatingip(self.fmt, network_id,
floating_ip='10.0.1.10',
- http_status=exc.HTTPBadRequest.code)
+ http_status=exc.HTTPBadRequest.code,
+ as_admin=True)
def test_create_floatingip_with_duplicated_specific_ip(self):
@@ -3514,11 +3515,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
self._make_floatingip(self.fmt, network_id,
- floating_ip='10.0.0.10')
+ floating_ip='10.0.0.10',
+ as_admin=True)
self._make_floatingip(self.fmt, network_id,
floating_ip='10.0.0.10',
- http_status=exc.HTTPConflict.code)
+ http_status=exc.HTTPConflict.code,
+ as_admin=True)
def test_create_floatingips_native_quotas(self):
quota = 1
@@ -3711,7 +3714,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
{'port_id': port['port']['id']})
# fetch port and confirm device_id and device_owner
- body = self._show('ports', port['port']['id'])
+ body = self._show('ports', port['port']['id'],
+ tenant_id=tenant_id)
self.assertEqual('', body['port']['device_owner'])
self.assertEqual('', body['port']['device_id'])
@@ -3756,7 +3760,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
data = {'port': {'fixed_ips': [
{'ip_address': gw_ip}]}}
req = self.new_update_request('ports', data,
- gw_port_id)
+ gw_port_id,
+ as_admin=True)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(gw_ip_len, len(res['port']['fixed_ips']))
@@ -3833,9 +3838,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'network_id': network['network']['id'],
'subnetpool_id': subnetpool_id,
'prefixlen': 24,
- 'ip_version': lib_constants.IP_VERSION_4,
- 'tenant_id': tenant_id}}
- req = self.new_create_request('subnets', data)
+ 'ip_version': lib_constants.IP_VERSION_4}}
+ req = self.new_create_request('subnets', data, tenant_id=tenant_id)
subnet = self.deserialize(self.fmt, req.get_response(self.api))
admin_ctx = context.get_admin_context()
@@ -3881,7 +3885,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
# simulate a failed update by just setting the device_id of
# the fip port back to PENDING
data = {'port': {'device_id': 'PENDING'}}
- self._update('ports', fip_port['id'], data)
+ self._update('ports', fip_port['id'], data, as_admin=True)
plugin._clean_garbage()
# first call just marks as candidate, so it shouldn't be changed
port = self._show('ports', fip_port['id'])
@@ -3925,7 +3929,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
events.BEFORE_DELETE)
with self.subnet():
- res = self._create_router(self.fmt, _uuid())
+ res = self._create_router(self.fmt)
router = self.deserialize(self.fmt, res)
self._delete('routers', router['router']['id'],
exc.HTTPForbidden.code)
@@ -4151,8 +4155,7 @@ class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
f = self._make_floatingip(self.fmt,
public_sub['subnet']['network_id'],
port_id=None,
- fixed_ip=None,
- set_context=True)
+ fixed_ip=None)
self._delete('floatingips', f['floatingip']['id'])
fake_method.assert_called_once_with(
resources.FLOATING_IP, events.AFTER_DELETE, mock.ANY,
@@ -4194,7 +4197,7 @@ class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
# converted into its API equivalent of 404
e404 = mock.Mock(side_effect=l3_exc.RouterNotFound(router_id='1'))
registry.subscribe(e404, resources.ROUTER, events.PRECOMMIT_CREATE)
- res = self._create_router(self.fmt, 'tenid')
+ res = self._create_router(self.fmt)
self.assertEqual(exc.HTTPNotFound.code, res.status_int)
# make sure nothing committed
body = self._list('routers')
@@ -4521,7 +4524,7 @@ class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin):
self.mock_admin_client.reset_mock()
def _create_network(self, fmt, name, admin_state_up,
- arg_list=None, set_context=False, tenant_id=None,
+ arg_list=None, tenant_id=None, as_admin=False,
**kwargs):
new_arg_list = ('dns_domain',)
if arg_list is not None:
@@ -4529,12 +4532,12 @@ class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin):
return super(L3NatDBFloatingIpTestCaseWithDNS,
self)._create_network(fmt, name, admin_state_up,
arg_list=new_arg_list,
- set_context=set_context,
tenant_id=tenant_id,
+ as_admin=as_admin,
**kwargs)
def _create_port(self, fmt, name, admin_state_up,
- arg_list=None, set_context=False, tenant_id=None,
+ arg_list=None, tenant_id=None, is_admin=False,
**kwargs):
new_arg_list = ('dns_name',)
if arg_list is not None:
@@ -4542,8 +4545,8 @@ class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin):
return super(L3NatDBFloatingIpTestCaseWithDNS,
self)._create_port(fmt, name, admin_state_up,
arg_list=new_arg_list,
- set_context=set_context,
tenant_id=tenant_id,
+ is_admin=is_admin,
**kwargs)
def _create_net_sub_port(self, dns_domain='', dns_name=''):
diff --git a/neutron/tests/unit/extensions/test_l3_conntrack_helper.py b/neutron/tests/unit/extensions/test_l3_conntrack_helper.py
index 12c1c273ab..e49125bb0a 100644
--- a/neutron/tests/unit/extensions/test_l3_conntrack_helper.py
+++ b/neutron/tests/unit/extensions/test_l3_conntrack_helper.py
@@ -19,7 +19,6 @@ from webob import exc
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import l3_conntrack_helper as l3_ct
-from neutron_lib import context
from oslo_utils import uuidutils
from neutron.extensions import l3
@@ -67,19 +66,16 @@ class L3NConntrackHelperTestCase(test_l3.L3BaseForIntTests,
def _create_router_conntrack_helper(self, fmt, router_id,
protocol, port, helper):
- tenant_id = self.tenant_id or _uuid()
data = {'conntrack_helper': {
"protocol": protocol,
"port": port,
"helper": helper}
}
- router_ct_req = self._req(
- 'POST', 'routers', data,
+ router_ct_req = self.new_create_request(
+ 'routers', data,
fmt or self.fmt, id=router_id,
- subresource='conntrack_helpers')
-
- router_ct_req.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ subresource='conntrack_helpers',
+ as_admin=True)
return router_ct_req.get_response(self.ext_api)
@@ -90,11 +86,10 @@ class L3NConntrackHelperTestCase(test_l3.L3BaseForIntTests,
conntrack_helper[k] = v
data = {'conntrack_helper': conntrack_helper}
- router_ct_req = self._req(
- 'PUT', 'routers', data,
- fmt or self.fmt, id=router_id,
- sub_id=conntrack_helper_id,
- subresource='conntrack_helpers')
+ router_ct_req = self.new_update_request(
+ 'routers', data, router_id,
+ fmt or self.fmt, sub_id=conntrack_helper_id,
+ subresource='conntrack_helpers', as_admin=True)
return router_ct_req.get_response(self.ext_api)
def test_create_ct_with_duplicate_entry(self):
diff --git a/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py b/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py
index 9c941738ab..f8cc6a4bca 100644
--- a/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py
+++ b/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py
@@ -380,7 +380,7 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
def _set_router_external_gateway(self, router_id, network_id,
snat_enabled=None,
expected_code=exc.HTTPOk.code,
- neutron_context=None):
+ tenant_id=None, as_admin=False):
ext_gw_info = {'network_id': network_id}
# Need to set enable_snat also if snat_enabled == False
if snat_enabled is not None:
@@ -389,7 +389,8 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
{'router': {'external_gateway_info':
ext_gw_info}},
expected_code=expected_code,
- neutron_context=neutron_context)
+ request_tenant_id=tenant_id,
+ as_admin=as_admin)
def test_router_gateway_set_fail_after_port_create(self):
with self.router() as r, self.subnet() as s:
@@ -444,7 +445,8 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
('external_gateway_info', None)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
- res = self._show('routers', router['router']['id'])
+ res = self._show('routers', router['router']['id'],
+ tenant_id=tenant_id)
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
@@ -468,8 +470,10 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'subnet_id': s['subnet']['id']}]})]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id,
- external_gateway_info=input_value) as router:
- res = self._show('routers', router['router']['id'])
+ external_gateway_info=input_value,
+ as_admin=True) as router:
+ res = self._show('routers', router['router']['id'],
+ tenant_id=tenant_id)
for k, v in expected_value:
self.assertEqual(v, res['router'][k])
@@ -493,7 +497,8 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
self._set_router_external_gateway(
r['router']['id'], ext_net_id,
snat_enabled=snat_input_value,
- expected_code=expected_http_code)
+ expected_code=expected_http_code,
+ as_admin=True)
if expected_http_code != exc.HTTPOk.code:
return
body = self._show('routers', r['router']['id'])
diff --git a/neutron/tests/unit/extensions/test_l3_ndp_proxy.py b/neutron/tests/unit/extensions/test_l3_ndp_proxy.py
index d5ce2d728a..1836219cc7 100644
--- a/neutron/tests/unit/extensions/test_l3_ndp_proxy.py
+++ b/neutron/tests/unit/extensions/test_l3_ndp_proxy.py
@@ -22,7 +22,6 @@ from neutron_lib.api.definitions import external_net as enet_apidef
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import l3_ext_gw_mode
from neutron_lib import constants
-from neutron_lib import context
from neutron_lib import fixture
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -66,7 +65,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
test_l3.L3BaseForIntTests,
test_l3.L3NatTestCaseMixin):
fmt = 'json'
- tenant_id = _uuid()
+ _tenant_id = _uuid()
def setUp(self):
mock.patch('neutron.api.rpc.handlers.resources_rpc.'
@@ -81,11 +80,11 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
self.address_scope_id = self._make_address_scope(
self.fmt, constants.IP_VERSION_6,
- **{'tenant_id': self.tenant_id})['address_scope']['id']
+ **{'tenant_id': self._tenant_id})['address_scope']['id']
self.subnetpool_id = self._make_subnetpool(
self.fmt, ['2001::0/96'],
**{'address_scope_id': self.address_scope_id,
- 'default_prefixlen': 112, 'tenant_id': self.tenant_id,
+ 'default_prefixlen': 112,
'name': "test-ipv6-pool"})['subnetpool']['id']
self.ext_net = self._make_network(
self.fmt, 'ext-net', True)
@@ -103,7 +102,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
self._ext_subnet_v6_id = self._ext_subnet_v6['subnet']['id']
- self.router1 = self._make_router(self.fmt, self.tenant_id)
+ self.router1 = self._make_router(self.fmt, self._tenant_id)
self.router1_id = self.router1['router']['id']
self.private_net = self._make_network(self.fmt, 'private-net', True)
self.private_subnet = self._make_subnet(
@@ -125,7 +124,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
description=None, fmt=None, tenant_id=None,
expected_code=exc.HTTPCreated.code,
expected_message=None):
- tenant_id = tenant_id or self.tenant_id
+ tenant_id = tenant_id or self._tenant_id
data = {'ndp_proxy': {
"port_id": port_id,
"router_id": router_id}
@@ -135,11 +134,9 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
if description:
data['ndp_proxy']['description'] = description
- req_res = self._req(
- 'POST', 'ndp-proxies', data,
- fmt or self.fmt)
- req_res.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ req_res = self.new_create_request(
+ 'ndp-proxies', data, fmt or self.fmt,
+ tenant_id=tenant_id, as_admin=True)
res = req_res.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
@@ -152,15 +149,14 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
tenant_id=None, fmt=None,
expected_code=exc.HTTPOk.code,
expected_message=None, **kwargs):
- tenant_id = tenant_id or self.tenant_id
+ tenant_id = tenant_id or self._tenant_id
data = {}
for k, v in kwargs.items():
data[k] = v
- req_res = self._req(
- 'PUT', 'ndp-proxies', {'ndp_proxy': data},
- fmt or self.fmt, id=ndp_proxy_id)
- req_res.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ req_res = self.new_update_request(
+ 'ndp-proxies', {'ndp_proxy': data},
+ ndp_proxy_id, fmt or self.fmt,
+ tenant_id=tenant_id, as_admin=True)
res = req_res.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
if expected_message:
@@ -208,13 +204,12 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
def _update_router(self, router_id, update_date, tenant_id=None,
fmt=None, expected_code=exc.HTTPOk.code,
expected_message=None):
- tenant_id = tenant_id or self.tenant_id
+ tenant_id = tenant_id or self._tenant_id
data = {'router': update_date}
router_req = self.new_update_request(
'routers', id=router_id, data=data,
- fmt=(fmt or self.fmt))
- router_req.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ fmt=(fmt or self.fmt),
+ tenant_id=tenant_id, as_admin=True)
res = router_req.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
if expected_message:
@@ -275,7 +270,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
ipv6_address_mode=constants.DHCPV6_STATEFUL):
self._set_net_external(ext_net['network']['id'])
res = self._make_router(
- self.fmt, self.tenant_id,
+ self.fmt, self._tenant_id,
external_gateway_info={'network_id': ext_net['network']['id']},
**{'enable_ndp_proxy': True})
expected_msg = (
@@ -284,7 +279,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
"scope.") % ext_net['network']['id']
self.assertTrue(expected_msg in res['NeutronError']['message'])
router = self._make_router(
- self.fmt, self.tenant_id,
+ self.fmt, self._tenant_id,
external_gateway_info={'network_id': ext_net['network']['id']})
expected_msg = (
"Can not enable ndp proxy on router %s, The router has no "
@@ -473,18 +468,18 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
def test_create_ndp_proxy_with_different_address_scope(self):
with self.address_scope(
ip_version=constants.IP_VERSION_6,
- tenant_id=self.tenant_id) as addr_scope, \
+ tenant_id=self._tenant_id) as addr_scope, \
self.subnetpool(['2001::100:0:0/100'],
**{'address_scope_id': addr_scope['address_scope']['id'],
'default_prefixlen': 112, 'name': 'test1',
- 'tenant_id': self.tenant_id}) as subnetpool, \
+ 'tenant_id': self._tenant_id}) as subnetpool, \
self.subnet(
cidr='2001::100:1:0/112',
ip_version=constants.IP_VERSION_6,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL,
subnetpool_id=subnetpool['subnetpool']['id'],
- tenant_id=self.tenant_id) as subnet, \
+ tenant_id=self._tenant_id) as subnet, \
self.port(subnet) as port:
subnet_id = subnet['subnet']['id']
port_id = port['port']['id']
@@ -503,9 +498,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
def _create_router(self, data, expected_code=exc.HTTPCreated.code,
expected_message=None):
router_req = self.new_create_request(
- 'routers', data, self.fmt)
- router_req.environ['neutron.context'] = context.Context(
- '', self.tenant_id, is_admin=True)
+ 'routers', data, self.fmt, as_admin=True)
res = router_req.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
if expected_message:
diff --git a/neutron/tests/unit/extensions/test_local_ip.py b/neutron/tests/unit/extensions/test_local_ip.py
index 5de762d8b1..e15b31e821 100644
--- a/neutron/tests/unit/extensions/test_local_ip.py
+++ b/neutron/tests/unit/extensions/test_local_ip.py
@@ -19,7 +19,6 @@ from unittest import mock
import netaddr
from neutron_lib.api.definitions import local_ip as apidef
from neutron_lib import constants
-from neutron_lib import context
import webob.exc
from neutron.extensions import local_ip as lip_ext
@@ -46,10 +45,8 @@ class LocalIPTestBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
for k, v in kwargs.items():
local_ip['local_ip'][k] = v
- req = self.new_create_request('local-ips', local_ip)
- neutron_context = context.Context(
- '', kwargs.get('project_id', self._tenant_id), is_admin=True)
- req.environ['neutron.context'] = neutron_context
+ req = self.new_create_request('local-ips', local_ip,
+ tenant_id=self._tenant_id, as_admin=True)
res = req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -57,9 +54,7 @@ class LocalIPTestBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _update_local_ip(self, lip_id, data):
update_req = self.new_update_request(
- 'local-ips', data, lip_id)
- update_req.environ['neutron.context'] = context.Context(
- '', self._tenant_id)
+ 'local-ips', data, lip_id, tenant_id=self._tenant_id)
res = update_req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -73,9 +68,8 @@ class LocalIPTestBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
req = self.new_create_request('local_ips',
data=local_ip_assoc,
id=local_ip_id,
- subresource='port_associations')
- neutron_context = context.Context('', self._tenant_id)
- req.environ['neutron.context'] = neutron_context
+ subresource='port_associations',
+ tenant_id=self._tenant_id)
res = req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
diff --git a/neutron/tests/unit/extensions/test_network_ip_availability.py b/neutron/tests/unit/extensions/test_network_ip_availability.py
index 833ee89c95..f52d000d9a 100644
--- a/neutron/tests/unit/extensions/test_network_ip_availability.py
+++ b/neutron/tests/unit/extensions/test_network_ip_availability.py
@@ -65,7 +65,9 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# list by query fields: total_ips
params = 'fields=total_ips'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE,
+ params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -83,7 +85,8 @@ class TestNetworkIPAvailabilityAPI(
params = ['total_ips']
request = self.new_show_request(API_RESOURCE,
network['id'],
- fields=params)
+ fields=params,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@@ -103,7 +106,9 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
network = net['network']
# Get ALL
- request = self.new_list_request(API_RESOURCE, self.fmt)
+ request = self.new_list_request(API_RESOURCE,
+ self.fmt,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -112,7 +117,8 @@ class TestNetworkIPAvailabilityAPI(
net, 0)
# Get single via id
- request = self.new_show_request(API_RESOURCE, network['id'])
+ request = self.new_show_request(API_RESOURCE, network['id'],
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@@ -134,7 +140,8 @@ class TestNetworkIPAvailabilityAPI(
self.port(subnet=subnet3_1):
# Test get ALL
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -148,7 +155,8 @@ class TestNetworkIPAvailabilityAPI(
# Test get single via network id
network = n1['network']
request = self.new_show_request(API_RESOURCE,
- network['id'])
+ network['id'],
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@@ -165,7 +173,8 @@ class TestNetworkIPAvailabilityAPI(
self.port(subnet=subnet1_2),\
self.port(subnet=subnet1_2):
# Get ALL
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -176,7 +185,8 @@ class TestNetworkIPAvailabilityAPI(
# Get single via network id
network = n1['network']
request = self.new_show_request(API_RESOURCE,
- network['id'])
+ network['id'],
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@@ -186,7 +196,8 @@ class TestNetworkIPAvailabilityAPI(
def test_usages_port_consumed_v4(self):
with self.network() as net:
with self.subnet(network=net) as subnet:
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
# Consume 2 ports
with self.port(subnet=subnet), self.port(subnet=subnet):
response = self.deserialize(self.fmt,
@@ -200,7 +211,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get IPv4
params = 'ip_version=%s' % constants.IP_VERSION_4
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -210,7 +222,8 @@ class TestNetworkIPAvailabilityAPI(
# Get IPv6 should return empty array
params = 'ip_version=%s' % constants.IP_VERSION_6
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -225,7 +238,8 @@ class TestNetworkIPAvailabilityAPI(
ipv6_address_mode=constants.DHCPV6_STATELESS):
# Get IPv6
params = 'ip_version=%s' % constants.IP_VERSION_6
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
@@ -234,7 +248,8 @@ class TestNetworkIPAvailabilityAPI(
# Get IPv4 should return empty array
params = 'ip_version=%s' % constants.IP_VERSION_4
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -247,7 +262,8 @@ class TestNetworkIPAvailabilityAPI(
network=net, cidr=cidr_ipv6,
ip_version=constants.IP_VERSION_6,
ipv6_address_mode=constants.DHCPV6_STATELESS) as subnet:
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
# Consume 3 ports
with self.port(subnet=subnet),\
self.port(subnet=subnet), \
@@ -266,7 +282,8 @@ class TestNetworkIPAvailabilityAPI(
test_id = network['id']
# Get by query param: network_id
params = 'network_id=%s' % test_id
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -276,7 +293,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: network_id
params = 'network_id=clearlywontmatch'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -287,7 +305,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get by query param: network_name
params = 'network_name=%s' % test_name
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -297,7 +316,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: network_name
params = 'network_name=clearly-wont-match'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -308,7 +328,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get by query param: tenant_id
params = 'tenant_id=%s' % test_tenant_id
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -320,7 +341,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: tenant_id
params = 'tenant_id=clearly-wont-match'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -331,7 +353,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get by query param: project_id
params = 'project_id=%s' % test_project_id
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -343,7 +366,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: project_id
params = 'project_id=clearly-wont-match'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -369,7 +393,8 @@ class TestNetworkIPAvailabilityAPI(
self.port(subnet=s42), self.port(subnet=s42):
# Verify consumption across all
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
avails_list = response[IP_AVAILS_KEY]
@@ -387,7 +412,8 @@ class TestNetworkIPAvailabilityAPI(
constants.IP_VERSION_6]:
params = 'ip_version=%i' % ip_ver
request = self.new_list_request(API_RESOURCE,
- params=params)
+ params=params,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
for net_avail in response[IP_AVAILS_KEY]:
@@ -399,7 +425,8 @@ class TestNetworkIPAvailabilityAPI(
API_RESOURCE,
params='network_id=%s&network_id=%s'
% (net_v4_2['network']['id'],
- net_v6_2['network']['id']))
+ net_v6_2['network']['id']),
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
avails_list = response[IP_AVAILS_KEY]
@@ -414,7 +441,8 @@ class TestNetworkIPAvailabilityAPI(
networks = (net1, net2, net3, net4)
for idx in range(1, len(networks) + 1):
params = 'limit=%s' % idx
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(idx, len(response[IP_AVAILS_KEY]))
@@ -426,14 +454,16 @@ class TestNetworkIPAvailabilityAPI(
network_ids = sorted([net['network']['id'] for net in networks])
params = 'sort_key=network_id;sort_dir=asc'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
res = [net['network_id'] for net in response[IP_AVAILS_KEY]]
self.assertEqual(network_ids, res)
params = 'sort_key=network_id;sort_dir=desc'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
res = [net['network_id'] for net in response[IP_AVAILS_KEY]]
diff --git a/neutron/tests/unit/extensions/test_network_segment_range.py b/neutron/tests/unit/extensions/test_network_segment_range.py
index 2592a0366a..9bf4ca3660 100644
--- a/neutron/tests/unit/extensions/test_network_segment_range.py
+++ b/neutron/tests/unit/extensions/test_network_segment_range.py
@@ -56,7 +56,8 @@ class NetworkSegmentRangeTestBase(test_db_base_plugin_v2.
network_segment_range['network_segment_range'][k] = str(v)
network_segment_range_req = self.new_create_request(
- 'network-segment-ranges', network_segment_range, fmt)
+ 'network-segment-ranges', network_segment_range, fmt,
+ as_admin=True)
network_segment_range_res = network_segment_range_req.get_response(
self.ext_api)
@@ -84,7 +85,7 @@ class NetworkSegmentRangeTestBase(test_db_base_plugin_v2.
def _test_update_network_segment_range(self, range_id,
data, expected=None):
update_req = self.new_update_request(
- 'network-segment-ranges', data, range_id)
+ 'network-segment-ranges', data, range_id, as_admin=True)
update_res = update_req.get_response(self.ext_api)
if expected:
@@ -264,7 +265,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'name': 'foo-name'}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual('foo-name',
result['network_segment_range']['name'])
@@ -277,7 +279,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'name': ''}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual('', result['network_segment_range']['name'])
def test_update_network_segment_range_min_max(self):
@@ -288,7 +291,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'minimum': 1200, 'maximum': 1300}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual(1200, result['network_segment_range']['minimum'])
self.assertEqual(1300, result['network_segment_range']['maximum'])
@@ -296,7 +300,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
network_segment_range = self._test_create_network_segment_range()
req = self.new_show_request(
'network-segment-ranges',
- network_segment_range['network_segment_range']['id'])
+ network_segment_range['network_segment_range']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(
network_segment_range['network_segment_range']['id'],
@@ -306,7 +311,7 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
self._test_create_network_segment_range(name='foo-range1')
self._test_create_network_segment_range(
name='foo-range2', minimum=400, maximum=500)
- res = self._list('network-segment-ranges')
+ res = self._list('network-segment-ranges', as_admin=True)
self.assertEqual(2, len(res['network_segment_ranges']))
def test_list_network_segment_ranges_with_sort(self):
@@ -316,7 +321,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
name='foo-range2', physical_network='phys_net2')
self._test_list_with_sort('network-segment-range',
(range2, range1),
- [('name', 'desc')])
+ [('name', 'desc')],
+ as_admin=True)
def test_list_network_segment_ranges_with_pagination(self):
range1 = self._test_create_network_segment_range(
@@ -328,7 +334,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
self._test_list_with_pagination(
'network-segment-range',
(range1, range2, range3),
- ('name', 'asc'), 2, 2)
+ ('name', 'asc'), 2, 2,
+ as_admin=True)
def test_list_network_segment_ranges_with_pagination_reverse(self):
range1 = self._test_create_network_segment_range(
@@ -340,14 +347,17 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
self._test_list_with_pagination_reverse(
'network-segment-range',
(range1, range2, range3),
- ('name', 'asc'), 2, 2)
+ ('name', 'asc'), 2, 2,
+ as_admin=True)
def test_delete_network_segment_range(self):
network_segment_range = self._test_create_network_segment_range()
with mock.patch.object(segments_db, 'network_segments_exist_in_range',
return_value=False):
self._delete('network-segment-ranges',
- network_segment_range['network_segment_range']['id'])
+ network_segment_range['network_segment_range']['id'],
+ as_admin=True)
self._show('network-segment-ranges',
network_segment_range['network_segment_range']['id'],
- expected_code=webob.exc.HTTPNotFound.code)
+ expected_code=webob.exc.HTTPNotFound.code,
+ as_admin=True)
diff --git a/neutron/tests/unit/extensions/test_portsecurity.py b/neutron/tests/unit/extensions/test_portsecurity.py
index 027740c2f2..aedd75251d 100644
--- a/neutron/tests/unit/extensions/test_portsecurity.py
+++ b/neutron/tests/unit/extensions/test_portsecurity.py
@@ -18,7 +18,6 @@ from unittest import mock
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api import validators
-from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.db import utils as db_utils
from neutron_lib.exceptions import port_security as psec_exc
@@ -311,11 +310,11 @@ class TestPortSecurity(PortSecurityDBTestCase):
self.skipTest("Plugin does not support security groups")
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
- set_context=True,
tenant_id='admin_tenant',
port_security_enabled=False)
net = self.deserialize('json', res)
- self._create_subnet('json', net['network']['id'], '10.0.0.0/24')
+ self._create_subnet('json', net['network']['id'], '10.0.0.0/24',
+ tenant_id='admin_tenant')
security_group = self.deserialize(
'json', self._create_security_group(self.fmt, 'asdf', 'asdf',
tenant_id='other_tenant'))
@@ -323,7 +322,6 @@ class TestPortSecurity(PortSecurityDBTestCase):
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
- set_context=True,
is_admin=True,
tenant_id='admin_tenant',
port_security_enabled=True,
@@ -331,19 +329,18 @@ class TestPortSecurity(PortSecurityDBTestCase):
port = self.deserialize('json', res)
self.assertTrue(port['port'][psec.PORTSECURITY])
self.assertEqual(port['port']['security_groups'], [security_group_id])
- self._delete('ports', port['port']['id'])
+ self._delete('ports', port['port']['id'], tenant_id='admin_tenant')
def test_create_port_with_no_admin_use_other_tenant_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
- set_context=True,
tenant_id='demo_tenant',
port_security_enabled=False)
net = self.deserialize('json', res)
self._create_subnet('json', net['network']['id'], '10.0.0.0/24',
- set_context=True, tenant_id='demo_tenant')
+ tenant_id='demo_tenant')
security_group = self.deserialize(
'json', self._create_security_group(self.fmt, 'asdf', 'asdf',
tenant_id='other_tenant'))
@@ -351,7 +348,6 @@ class TestPortSecurity(PortSecurityDBTestCase):
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
- set_context=True,
tenant_id='demo_tenant',
port_security_enabled=True,
security_groups=[security_group_id])
@@ -396,7 +392,7 @@ class TestPortSecurity(PortSecurityDBTestCase):
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
- set_context=True, is_admin=True,
+ is_admin=True,
tenant_id='admin_tenant',)
port = self.deserialize('json', res)
self.assertTrue(port['port'][psec.PORTSECURITY])
@@ -408,7 +404,9 @@ class TestPortSecurity(PortSecurityDBTestCase):
update_port = {'port':
{'security_groups': [security_group_id]}}
req = self.new_update_request('ports', update_port,
- port['port']['id'])
+ port['port']['id'],
+ tenant_id='admin_tenant',
+ as_admin=True)
port = self.deserialize('json', req.get_response(self.api))
security_groups = port['port']['security_groups']
self.assertIn(security_group_id, security_groups)
@@ -420,7 +418,6 @@ class TestPortSecurity(PortSecurityDBTestCase):
with self.network(tenant_id='demo_tenant') as net:
with self.subnet(network=net, tenant_id='demo_tenant'):
res = self._create_port('json', net['network']['id'],
- set_context=True,
tenant_id='demo_tenant',)
port = self.deserialize('json', res)
self.assertTrue(port['port'][psec.PORTSECURITY])
@@ -432,9 +429,8 @@ class TestPortSecurity(PortSecurityDBTestCase):
update_port = {'port':
{'security_groups': [security_group_id]}}
req = self.new_update_request('ports', update_port,
- port['port']['id'])
- req.environ['neutron.context'] = context.Context(
- '', 'other_tenant')
+ port['port']['id'],
+ tenant_id='other_tenant')
res = req.get_response(self.api)
self.assertEqual(404, res.status_int)
@@ -490,29 +486,26 @@ class TestPortSecurity(PortSecurityDBTestCase):
self._delete('ports', port['port']['id'])
def test_create_port_security_off_shared_network(self):
- with self.network(shared=True) as net:
+ with self.network(as_admin=True, shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=False,
- tenant_id='not_network_owner',
- set_context=True)
+ tenant_id='not_network_owner')
self.deserialize('json', res)
self.assertEqual(403, res.status_int)
def test_update_port_security_off_shared_network(self):
- with self.network(shared=True) as net:
+ with self.network(as_admin=True, shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
- tenant_id='not_network_owner',
- set_context=True)
+ tenant_id='not_network_owner')
port = self.deserialize('json', res)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
- port['port']['id'])
- req.environ['neutron.context'] = context.Context(
- '', 'not_network_owner')
+ port['port']['id'],
+ tenant_id='not_network_owner')
res = req.get_response(self.api)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
diff --git a/neutron/tests/unit/extensions/test_providernet.py b/neutron/tests/unit/extensions/test_providernet.py
index 61e1422aa2..fc40dae6c1 100644
--- a/neutron/tests/unit/extensions/test_providernet.py
+++ b/neutron/tests/unit/extensions/test_providernet.py
@@ -92,21 +92,28 @@ class ProvidernetExtensionTestCase(testlib_api.WebTestCase):
def _put_network_with_provider_attrs(self, ctx, expect_errors=False):
data = self._prepare_net_data()
+ ctx.roles = ['member', 'reader']
+ if ctx.is_admin:
+ ctx.roles.append('admin')
env = {'neutron.context': ctx}
instance = self.plugin.return_value
- instance.get_network.return_value = {'tenant_id': ctx.tenant_id,
+ instance.get_network.return_value = {'project_id': ctx.tenant_id,
'shared': False}
net_id = uuidutils.generate_uuid()
res = self.api.put(test_base._get_path('networks',
id=net_id,
fmt=self.fmt),
self.serialize({'network': data}),
+ content_type='application/' + self.fmt,
extra_environ=env,
expect_errors=expect_errors)
return res, data, net_id
def _post_network_with_provider_attrs(self, ctx, expect_errors=False):
data = self._prepare_net_data()
+ ctx.roles = ['member', 'reader']
+ if ctx.is_admin:
+ ctx.roles.append('admin')
env = {'neutron.context': ctx}
res = self.api.post(test_base._get_path('networks', fmt=self.fmt),
self.serialize({'network': data}),
@@ -119,6 +126,9 @@ class ProvidernetExtensionTestCase(testlib_api.WebTestCase):
expect_errors=False):
data = self._prepare_net_data()
data.update(bad_data)
+ ctx.roles = ['member', 'reader']
+ if ctx.is_admin:
+ ctx.roles.append('admin')
env = {'neutron.context': ctx}
res = self.api.post(test_base._get_path('networks', fmt=self.fmt),
self.serialize({'network': data}),
diff --git a/neutron/tests/unit/extensions/test_qos_gateway_ip.py b/neutron/tests/unit/extensions/test_qos_gateway_ip.py
index 246ed917b3..b8c6b73817 100644
--- a/neutron/tests/unit/extensions/test_qos_gateway_ip.py
+++ b/neutron/tests/unit/extensions/test_qos_gateway_ip.py
@@ -69,7 +69,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
- project_id='tenant', name='pol1',
+ project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
@@ -88,7 +88,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
- project_id='tenant', name='pol1',
+ project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
@@ -115,7 +115,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
- project_id='tenant', name='pol1',
+ project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
@@ -153,7 +153,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
- project_id='tenant', name='pol1',
+ project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
diff --git a/neutron/tests/unit/extensions/test_quotasv2.py b/neutron/tests/unit/extensions/test_quotasv2.py
index fab52f8be6..c5d914a944 100644
--- a/neutron/tests/unit/extensions/test_quotasv2.py
+++ b/neutron/tests/unit/extensions/test_quotasv2.py
@@ -120,8 +120,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_default_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
@@ -137,8 +136,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_default_quotas_with_owner_project(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.get(_get_path('quotas', id=project_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
@@ -154,8 +152,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_default_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=False)
res = self.api.get(_get_path('quotas', id=project_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
@@ -164,8 +161,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@@ -179,16 +175,14 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=False)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_project(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@@ -202,8 +196,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_list_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@@ -212,16 +205,14 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_list_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -230,8 +221,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_non_integer_returns_400(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -240,8 +230,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_negative_integer_returns_400(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -250,8 +239,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_out_of_range_integer_returns_400(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -260,8 +248,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_to_unlimited(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -270,8 +257,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_exceeding_current_limit(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -280,8 +266,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_non_support_resource_returns_400(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -290,8 +275,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
@@ -306,8 +290,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_attributes(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
@@ -321,8 +304,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
@mock.patch.object(driver_nolock.DbQuotaNoLockDriver, 'get_resource_usage')
def test_update_quotas_check_limit(self, mock_get_resource_usage):
tenant_id = 'tenant_id1'
- env = {'neutron.context': context.Context('', tenant_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(tenant_id, as_admin=True)
quotas = {'quota': {'network': 100, 'check_limit': False}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -338,8 +320,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_delete_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
# Create a quota to ensure we have something to delete
quotas = {'quota': {'network': 100}}
self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
@@ -350,16 +331,14 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_delete_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.delete(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quota_with_unknown_project_returns_404(self):
project_id = 'idnotexist'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.delete(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(exc.HTTPNotFound.code, res.status_int)
@@ -373,8 +352,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_quotas_limit_check(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=project_id,
fmt=self.fmt),
@@ -465,8 +443,7 @@ class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
def test_show_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@@ -489,8 +466,7 @@ class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
def test_delete_quotas_forbidden(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.delete(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
diff --git a/neutron/tests/unit/extensions/test_securitygroup.py b/neutron/tests/unit/extensions/test_securitygroup.py
index d0ca88b81a..bbf752e4cb 100644
--- a/neutron/tests/unit/extensions/test_securitygroup.py
+++ b/neutron/tests/unit/extensions/test_securitygroup.py
@@ -92,41 +92,39 @@ class SecurityGroupTestExtensionManager(object):
class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
- def _build_security_group(self, name, description, **kwargs):
+ def _build_security_group(self, name, description):
data = {
'security_group': {
'name': name,
- 'tenant_id': kwargs.get(
- 'tenant_id', test_db_base_plugin_v2.TEST_TENANT_ID),
'description': description}}
return data
- def _create_security_group_response(self, fmt, data, **kwargs):
- security_group_req = self.new_create_request('security-groups', data,
- fmt)
- if (kwargs.get('set_context') and 'tenant_id' in kwargs):
- # create a specific auth context for this request
- security_group_req.environ['neutron.context'] = (
- context.Context('', kwargs['tenant_id']))
+ def _create_security_group_response(self, fmt, data, tenant_id=None,
+ as_admin=False, **kwargs):
+ security_group_req = self.new_create_request(
+ 'security-groups', data, fmt, tenant_id=tenant_id,
+ as_admin=as_admin)
return security_group_req.get_response(self.ext_api)
- def _create_security_group(self, fmt, name, description, **kwargs):
- data = self._build_security_group(name, description, **kwargs)
- return self._create_security_group_response(fmt, data, **kwargs)
+ def _create_security_group(self, fmt, name, description, tenant_id=None,
+ as_admin=False, **kwargs):
+ data = self._build_security_group(name, description)
+ return self._create_security_group_response(
+ fmt, data, tenant_id=tenant_id, as_admin=as_admin, **kwargs)
def _build_security_group_rule(
self, security_group_id, direction, proto,
port_range_min=None, port_range_max=None,
remote_ip_prefix=None, remote_group_id=None,
remote_address_group_id=None,
- tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID,
- ethertype=const.IPv4):
+ tenant_id=None,
+ ethertype=const.IPv4,
+ as_admin=False):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': proto,
- 'ethertype': ethertype,
- 'tenant_id': tenant_id}}
+ 'ethertype': ethertype}}
if port_range_min:
data['security_group_rule']['port_range_min'] = port_range_min
@@ -145,19 +143,13 @@ class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
return data
- def _create_security_group_rule(self, fmt, rules, **kwargs):
+ def _create_security_group_rule(self, fmt, rules, tenant_id=None,
+ as_admin=False, **kwargs):
security_group_rule_req = self.new_create_request(
- 'security-group-rules', rules, fmt)
-
- if (kwargs.get('set_context') and 'tenant_id' in kwargs):
- # create a specific auth context for this request
- security_group_rule_req.environ['neutron.context'] = (
- context.Context('', kwargs['tenant_id']))
- elif kwargs.get('admin_context'):
- security_group_rule_req.environ['neutron.context'] = (
- context.Context(user_id='admin', tenant_id='admin-tenant',
- is_admin=True))
+ 'security-group-rules', rules, fmt, tenant_id=tenant_id,
+ as_admin=as_admin)
+
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, **kwargs):
@@ -166,8 +158,10 @@ class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
- def _make_security_group_rule(self, fmt, rules, **kwargs):
- res = self._create_security_group_rule(self.fmt, rules)
+ def _make_security_group_rule(self, fmt, rules, tenant_id=None,
+ as_admin=False, **kwargs):
+ res = self._create_security_group_rule(
+ self.fmt, rules, tenant_id=tenant_id, as_admin=as_admin)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@@ -819,9 +813,10 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
sg['security_group']['id'], "ingress", const.PROTO_NAME_TCP,
port_range_min=22, port_range_max=22,
remote_ip_prefix="10.0.2.0/24",
- ethertype=const.IPv4,
- tenant_id='admin-tenant')
- self._make_security_group_rule(self.fmt, rule, admin_context=True)
+ ethertype=const.IPv4)
+ self._make_security_group_rule(self.fmt, rule,
+ tenant_id='admin-tenant',
+ as_admin=True)
# Now, let's make sure all the rules are there, with their odd
# tenant_id behavior.
@@ -878,23 +873,20 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
- webob.exc.HTTPNoContent.code)
+ webob.exc.HTTPNoContent.code, as_admin=True)
def test_delete_default_security_group_nonadmin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
- neutron_context = context.Context(
- '', test_db_base_plugin_v2.TEST_TENANT_ID)
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPConflict.code,
- neutron_context=neutron_context)
+ tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID)
def test_security_group_list_creates_default_security_group(self):
- neutron_context = context.Context(
- '', test_db_base_plugin_v2.TEST_TENANT_ID)
sg = self._list('security-groups',
- neutron_context=neutron_context).get('security_groups')
+ tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID).get(
+ 'security_groups')
self.assertEqual(1, len(sg))
def test_security_group_port_create_creates_default_security_group(self):
@@ -2112,13 +2104,15 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP)
- rule['security_group_rule'].update({'id': specified_id,
- 'port_range_min': None,
- 'port_range_max': None,
- 'remote_ip_prefix': None,
- 'remote_group_id': None,
- 'remote_address_group_id':
- None})
+ rule['security_group_rule'].update({
+ 'id': specified_id,
+ 'port_range_min': None,
+ 'port_range_max': None,
+ 'remote_ip_prefix': None,
+ 'remote_group_id': None,
+ 'tenant_id': test_db_base_plugin_v2.TEST_TENANT_ID,
+ 'remote_address_group_id':
+ None})
result = self.plugin.create_security_group_rule(
neutron_context, rule)
self.assertEqual(specified_id, result['id'])
diff --git a/neutron/tests/unit/extensions/test_segment.py b/neutron/tests/unit/extensions/test_segment.py
index fdc84d4b34..1a3e33e8cf 100644
--- a/neutron/tests/unit/extensions/test_segment.py
+++ b/neutron/tests/unit/extensions/test_segment.py
@@ -114,7 +114,7 @@ class SegmentTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
segment['segment'][k] = None if v is None else str(v)
segment_req = self.new_create_request(
- 'segments', segment, fmt)
+ 'segments', segment, fmt, as_admin=True)
segment_res = segment_req.get_response(self.ext_api)
if expected_res_status:
@@ -189,7 +189,8 @@ class TestSegmentNameDescription(SegmentTestCase):
result = self._update('segments',
segment['segment']['id'],
{'segment': {'name': 'Segment name'}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual('Segment name', result['segment']['name'])
def test_update_segment_set_description(self):
@@ -197,7 +198,8 @@ class TestSegmentNameDescription(SegmentTestCase):
result = self._update('segments',
segment['segment']['id'],
{'segment': {'description': 'Segment desc'}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual('Segment desc', result['segment']['description'])
def test_update_segment_set_name_to_none(self):
@@ -206,7 +208,8 @@ class TestSegmentNameDescription(SegmentTestCase):
result = self._update('segments',
segment['segment']['id'],
{'segment': {'name': None}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertIsNone(result['segment']['name'])
def test_update_segment_set_description_to_none(self):
@@ -273,7 +276,8 @@ class TestSegment(SegmentTestCase):
with self.network() as network:
network = network['network']
- local_segment = self._list('segments')['segments'][0]
+ local_segment = self._list('segments',
+ as_admin=True)['segments'][0]
with mock.patch.object(registry, 'publish') as publish:
publish.side_effect = exceptions.CallbackFailure(errors=Exception)
self.assertRaises(webob.exc.HTTPClientError,
@@ -312,7 +316,7 @@ class TestSegment(SegmentTestCase):
physical_network='physnet0')
segment = self.segment(network_id=network['id'], segmentation_id=201,
physical_network='physnet1')
- self._delete('segments', segment['segment']['id'])
+ self._delete('segments', segment['segment']['id'], as_admin=True)
self._show('segments', segment['segment']['id'],
expected_code=webob.exc.HTTPNotFound.code)
@@ -326,8 +330,10 @@ class TestSegment(SegmentTestCase):
segment_id = segment['segment']['id']
with self.subnet(network=network, segment_id=segment_id):
self._delete('segments', segment_id,
- expected_code=webob.exc.HTTPConflict.code)
- exist_segment = self._show('segments', segment_id)
+ expected_code=webob.exc.HTTPConflict.code,
+ as_admin=True)
+ exist_segment = self._show('segments', segment_id,
+ as_admin=True)
self.assertEqual(segment_id, exist_segment['segment']['id'])
def test_get_segment(self):
@@ -336,7 +342,8 @@ class TestSegment(SegmentTestCase):
segment = self._test_create_segment(network_id=network['id'],
physical_network='physnet',
segmentation_id=200)
- req = self.new_show_request('segments', segment['segment']['id'])
+ req = self.new_show_request('segments', segment['segment']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(segment['segment']['id'], res['segment']['id'])
@@ -349,14 +356,15 @@ class TestSegment(SegmentTestCase):
self._test_create_segment(network_id=network['id'],
physical_network='physnet2',
segmentation_id=201)
- res = self._list('segments')
+ res = self._list('segments', as_admin=True)
self.assertEqual(3, len(res['segments']))
def test_list_segments_with_sort(self):
with self.network() as network:
network = network['network']
- local_segment = {'segment': self._list('segments')['segments'][0]}
+ local_segment = {'segment': self._list('segments',
+ as_admin=True)['segments'][0]}
s1 = self._test_create_segment(network_id=network['id'],
physical_network='physnet1',
segmentation_id=200)
@@ -366,13 +374,15 @@ class TestSegment(SegmentTestCase):
self._test_list_with_sort('segment',
(s2, s1, local_segment),
[('physical_network', 'desc')],
- query_params='network_id=%s' % network['id'])
+ query_params='network_id=%s' % network['id'],
+ as_admin=True)
def test_list_segments_with_pagination(self):
with self.network() as network:
network = network['network']
- local_segment = {'segment': self._list('segments')['segments'][0]}
+ local_segment = {'segment': self._list('segments',
+ as_admin=True)['segments'][0]}
s1 = self._test_create_segment(network_id=network['id'],
physical_network='physnet0',
segmentation_id=200)
@@ -386,7 +396,8 @@ class TestSegment(SegmentTestCase):
'segment',
(local_segment, s1, s2, s3),
('physical_network', 'asc'), 3, 2,
- query_params='network_id=%s' % network['id'])
+ query_params='network_id=%s' % network['id'],
+ as_admin=True)
def test_list_segments_with_pagination_reverse(self):
with self.network() as network:
@@ -405,7 +416,8 @@ class TestSegment(SegmentTestCase):
'segment',
(s1, s2, s3),
('physical_network', 'asc'), 2, 2,
- query_params='network_id=%s' % network['id'])
+ query_params='network_id=%s' % network['id'],
+ as_admin=True)
def test_update_segments(self):
with self.network() as network:
@@ -456,7 +468,7 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
with self.subnet(network=network, segment_id=segment_id) as subnet:
subnet = subnet['subnet']
- request = self.new_show_request('subnets', subnet['id'])
+ request = self.new_show_request('subnets', subnet['id'], as_admin=True)
response = request.get_response(self.api)
res = self.deserialize(self.fmt, response)
self.assertEqual(segment_id,
@@ -556,12 +568,14 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
with self.network() as network:
pass
- segment_id = self._list('segments')['segments'][0]['id']
+ segment_id = self._list('segments',
+ as_admin=True)['segments'][0]['id']
with self.subnet(network=network, segment_id=None) as subnet:
subnet = subnet['subnet']
data = {'subnet': {'segment_id': segment_id}}
- request = self.new_update_request('subnets', data, subnet['id'])
+ request = self.new_update_request('subnets', data, subnet['id'],
+ as_admin=True)
response = request.get_response(self.api)
res = self.deserialize(self.fmt, response)
@@ -582,7 +596,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
subnet = subnet['subnet']
data = {'subnet': {'segment_id': segment1['id']}}
- request = self.new_update_request('subnets', data, subnet['id'])
+ request = self.new_update_request('subnets', data, subnet['id'],
+ as_admin=True)
response = request.get_response(self.api)
res = self.deserialize(self.fmt, response)
@@ -604,7 +619,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
subnet = subnet['subnet']
data = {'subnet': {'segment_id': segment1['id']}}
- request = self.new_update_request('subnets', data, subnet['id'])
+ request = self.new_update_request('subnets', data, subnet['id'],
+ as_admin=True)
response = request.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)
@@ -627,7 +643,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
subnet2 = subnet2['subnet']
data = {'subnet': {'segment_id': segment1['id']}}
- request = self.new_update_request('subnets', data, subnet1['id'])
+ request = self.new_update_request('subnets', data, subnet1['id'],
+ as_admin=True)
response = request.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)
@@ -636,7 +653,7 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
with self.network() as network:
net = network['network']
- segment_id = self._list('segments')['segments'][0]['id']
+ segment_id = self._list('segments', as_admin=True)['segments'][0]['id']
with self.subnet(network=network, segment_id=segment_id) as subnet:
subnet = subnet['subnet']
@@ -645,7 +662,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
segmentation_id=202)['segment']
data = {'subnet': {'segment_id': segment2['id']}}
- request = self.new_update_request('subnets', data, subnet['id'])
+ request = self.new_update_request('subnets', data, subnet['id'],
+ as_admin=True)
response = request.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)
@@ -855,7 +873,7 @@ class TestMl2HostSegmentMappingOVS(HostSegmentMappingTestCase):
def test_segment_deletion_removes_host_mapping(self):
host = 'host1'
segment = self._test_one_segment_one_host(host)
- self._delete('segments', segment['id'])
+ self._delete('segments', segment['id'], as_admin=True)
segments_host_db = self._get_segments_for_host(host)
self.assertFalse(segments_host_db)
@@ -1021,7 +1039,8 @@ class SegmentAwareIpamTestCase(SegmentTestCase):
segment_id=segment['segment']['id'],
ip_version=ip_version,
cidr=cidr,
- allocation_pools=allocation_pools) as subnet:
+ allocation_pools=allocation_pools,
+ as_admin=True) as subnet:
self._validate_l2_adjacency(network['network']['id'],
is_adjacent=False)
return subnet
@@ -1098,6 +1117,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
fixed_ips=[
{'subnet_id': subnet['subnet']['id']}
])
@@ -1125,6 +1145,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@@ -1147,6 +1168,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@@ -1173,6 +1195,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
@@ -1188,6 +1211,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@@ -1201,6 +1225,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@@ -1220,6 +1245,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
self.deserialize(self.fmt, response)
@@ -1282,6 +1308,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
**kwargs)
port = self.deserialize(self.fmt, response)
request = self.new_show_request('ports', port['port']['id'])
@@ -1326,6 +1353,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
port = self.deserialize(self.fmt, response)
@@ -1362,6 +1390,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
port = self.deserialize(self.fmt, response)
@@ -1403,7 +1432,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
def _create_deferred_ip_port(self, network):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
- tenant_id=network['network']['tenant_id'])
+ tenant_id=network['network']['tenant_id'],
+ is_admin=True)
port = self.deserialize(self.fmt, response)
ips = port['port']['fixed_ips']
self.assertEqual(0, len(ips))
@@ -1423,7 +1453,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the only subnet is on a segment)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
# Port update succeeds and allocates a new IP address.
@@ -1441,7 +1472,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
with self.subnet(network=network):
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@@ -1457,7 +1489,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
- fixed_ips=[])
+ fixed_ips=[],
+ is_admin=True)
port = self.deserialize(self.fmt, response)
ips = port['port']['fixed_ips']
self.assertEqual(0, len(ips))
@@ -1465,7 +1498,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Create the subnet and try to update the port to get an IP
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@@ -1485,7 +1519,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
portbindings.HOST_ID: 'fakehost',
'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@@ -1510,7 +1545,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
portbindings.HOST_ID: 'fakehost',
'fixed_ips': []}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@@ -1528,7 +1564,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the only subnet is on a segment)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
res = self.deserialize(self.fmt, response)
@@ -1551,7 +1588,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the only subnet is on a segment)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.deserialize(self.fmt, response)
@@ -1599,7 +1637,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the subnet ran out of ips)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
res = self.deserialize(self.fmt, response)
@@ -1619,6 +1658,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])
@@ -1626,7 +1666,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Now, try to update binding to a host on the other segment
data = {'port': {portbindings.HOST_ID: 'fakehost2'}}
- port_req = self.new_update_request('ports', data, port['port']['id'])
+ port_req = self.new_update_request('ports', data, port['port']['id'],
+ as_admin=True)
response = port_req.get_response(self.api)
# It fails since the IP address isn't compatible with the new segment
@@ -1644,6 +1685,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])
@@ -1651,7 +1693,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Now, try to update binding to another host in same segment
data = {'port': {portbindings.HOST_ID: 'fakehost1'}}
- port_req = self.new_update_request('ports', data, port['port']['id'])
+ port_req = self.new_update_request('ports', data, port['port']['id'],
+ as_admin=True)
response = port_req.get_response(self.api)
# Since the new host is in the same segment, it succeeds.
@@ -1671,7 +1714,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
data = {'port': {portbindings.HOST_ID: 'fakehost',
port_apidef.PORT_MAC_ADDRESS: '00:00:00:00:00:01'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
# Port update succeeds and allocates a new IP address.
@@ -1722,6 +1766,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost_a'})
res = self.deserialize(self.fmt, response)
@@ -1849,7 +1894,8 @@ class TestSegmentAwareIpamML2(TestSegmentAwareIpam):
network, segment, subnet = self._create_test_segment_with_subnet()
self.assertTrue(self.VLAN_MIN <=
segment['segment']['segmentation_id'] <= self.VLAN_MAX)
- retrieved_segment = self._show('segments', segment['segment']['id'])
+ retrieved_segment = self._show('segments', segment['segment']['id'],
+ as_admin=True)
self.assertEqual(segment['segment']['segmentation_id'],
retrieved_segment['segment']['segmentation_id'])
@@ -1975,7 +2021,8 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
def test_update_subnet_association_with_segment(self, cidr='10.0.0.0/24',
allocation_pools=None):
with self.network() as network:
- segment_id = self._list('segments')['segments'][0]['id']
+ segment_id = self._list('segments',
+ as_admin=True)['segments'][0]['id']
network_id = network['network']['id']
self._setup_host_mappings([(segment_id, 'fakehost')])
@@ -1993,9 +2040,11 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
segment_id=None) as subnet:
self._validate_l2_adjacency(network_id, is_adjacent=True)
data = {'subnet': {'segment_id': segment_id}}
- self.new_update_request('subnets', data, subnet['subnet']['id'])
+ self.new_update_request('subnets', data, subnet['subnet']['id'],
+ as_admin=True)
self.new_update_request(
- 'subnets', data, subnet['subnet']['id']).get_response(self.api)
+ 'subnets', data, subnet['subnet']['id'],
+ as_admin=True).get_response(self.api)
self._validate_l2_adjacency(network_id, is_adjacent=False)
self._assert_inventory_creation(segment_id, aggregate, subnet)
@@ -2287,7 +2336,8 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
def _create_test_port(self, network_id, tenant_id, subnet, **kwargs):
port = self._make_port(self.fmt, network_id, tenant_id=tenant_id,
- arg_list=(portbindings.HOST_ID,), **kwargs)
+ as_admin=True, arg_list=(portbindings.HOST_ID,),
+ **kwargs)
self.batch_notifier._notify()
return port
@@ -2403,7 +2453,7 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
if compute_owned:
port_data['port']['device_owner'] = (
constants.DEVICE_OWNER_COMPUTE_PREFIX)
- self._update('ports', port['port']['id'], port_data)
+ self._update('ports', port['port']['id'], port_data, as_admin=True)
self.batch_notifier._notify()
self._assert_inventory_update_port(
first_subnet['subnet']['segment_id'], original_inventory,
diff --git a/neutron/tests/unit/extensions/test_servicetype.py b/neutron/tests/unit/extensions/test_servicetype.py
index a9e16cf899..5af9c14ac3 100644
--- a/neutron/tests/unit/extensions/test_servicetype.py
+++ b/neutron/tests/unit/extensions/test_servicetype.py
@@ -251,7 +251,8 @@ class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase):
super(ServiceTypeManagerExtTestCase, self).setUp()
def _list_service_providers(self):
- return self.api.get(_get_path('service-providers', fmt=self.fmt))
+ return self.api.get(_get_path('service-providers', fmt=self.fmt),
+ extra_environ=test_base._get_neutron_env())
def test_list_service_providers(self):
res = self._list_service_providers()
diff --git a/neutron/tests/unit/extensions/test_subnet_onboard.py b/neutron/tests/unit/extensions/test_subnet_onboard.py
index bc81600349..d0471ee57e 100644
--- a/neutron/tests/unit/extensions/test_subnet_onboard.py
+++ b/neutron/tests/unit/extensions/test_subnet_onboard.py
@@ -37,7 +37,7 @@ class SubnetOnboardTestsBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
- tenant_id = _uuid()
+ tenant_id = self._tenant_id
scope_data = {'tenant_id': tenant_id, 'ip_version': ip_version,
'shared': shared, 'name': name + '-scope'}
@@ -53,7 +53,7 @@ class SubnetOnboardTestsBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
- tenant_id = _uuid()
+ tenant_id = self._tenant_id
pool_data = {'tenant_id': tenant_id, 'shared': shared, 'name': name,
'address_scope_id': address_scope_id,
'prefixes': prefixes, 'is_default': is_default_pool}
diff --git a/neutron/tests/unit/extensions/test_subnet_service_types.py b/neutron/tests/unit/extensions/test_subnet_service_types.py
index a086ebef74..2d2e5c0c28 100644
--- a/neutron/tests/unit/extensions/test_subnet_service_types.py
+++ b/neutron/tests/unit/extensions/test_subnet_service_types.py
@@ -344,13 +344,14 @@ class SubnetServiceTypesExtensionTestCase(
tenant_id=network['tenant_id'],
device_owner=service_type,
arg_list=(portbindings.HOST_ID,),
- **{portbindings.HOST_ID: 'fakehost'})
+ **{portbindings.HOST_ID: 'fakehost'},
+ is_admin=True)
port = self.deserialize('json', port)['port']
# Update the port's host binding.
data = {'port': {portbindings.HOST_ID: 'fakehost2'}}
# self._update will fail with a MismatchError if the update cannot be
# applied
- port = self._update('ports', port['id'], data)
+ port = self._update('ports', port['id'], data, as_admin=True)
class SubnetServiceTypesExtensionTestCasev6(
diff --git a/neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py b/neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py
index 84e38073f5..01ef193be4 100644
--- a/neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py
+++ b/neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py
@@ -36,7 +36,7 @@ class SubnetpoolPrefixOpsTestBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
- tenant_id = _uuid()
+ tenant_id = self._tenant_id
scope_data = {'tenant_id': tenant_id, 'ip_version': ip_version,
'shared': shared, 'name': name + '-scope'}
@@ -52,7 +52,7 @@ class SubnetpoolPrefixOpsTestBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
- tenant_id = _uuid()
+ tenant_id = self._tenant_id
pool_data = {'tenant_id': tenant_id, 'shared': shared, 'name': name,
'address_scope_id': address_scope_id,
'prefixes': prefixes, 'is_default': is_default_pool}
diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py
index ef694a059e..b1be2da6f9 100644
--- a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py
+++ b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py
@@ -78,6 +78,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '1'}
self._network = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
@@ -86,6 +87,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '2'}
self._network2 = self._make_network(self.fmt, 'net2', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
@@ -94,6 +96,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
net_arg = {pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'noagent'}
self._network3 = self._make_network(self.fmt, 'net3', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,),
**net_arg)
@@ -299,6 +302,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -329,6 +333,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -357,6 +362,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network,
enable_dhcp=False) as snet:
with self.port(
+ is_admin=True,
subnet=snet,
project_id=self.tenant,
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)\
@@ -365,8 +371,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
plugin.update_distributed_port_binding(self.adminContext,
port_id, {'port': {portbindings.HOST_ID: HOST_4,
'device_id': router['id']}})
- port = self._show('ports', port_id,
- neutron_context=self.adminContext)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.callbacks.update_device_up(self.adminContext,
@@ -388,6 +393,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -423,6 +429,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -478,10 +485,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
@@ -512,9 +521,11 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_3'}
with self.port(subnet=subnet,
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
@@ -535,10 +546,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network2) as subnet:
host_arg = {portbindings.HOST_ID: host}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
@@ -569,11 +582,13 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
@@ -610,12 +625,14 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST,
'admin_state_up': True}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2',
'admin_state_up': True}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,
'admin_state_up',),
@@ -669,16 +686,19 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.subnet(cidr='10.1.0.0/24') as subnet2:
with self.port(subnet=subnet2,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
@@ -742,6 +762,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
ipv6_address_mode=constants.IPV6_SLAAC) as subnet2:
with self.port(
subnet,
+ is_admin=True,
fixed_ips=[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}],
device_owner=DEVICE_OWNER_COMPUTE,
@@ -783,10 +804,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST}
# 2 ports on host 1
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@@ -794,6 +817,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
# agent on host 1
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
@@ -833,10 +857,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@@ -877,10 +903,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@@ -919,6 +947,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -954,6 +983,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@@ -966,6 +996,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
device=device)
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@@ -995,10 +1026,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@@ -1029,6 +1062,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_5'}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -1043,7 +1077,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
new_mac = ':'.join(mac)
data = {'port': {'mac_address': new_mac,
portbindings.HOST_ID: HOST}}
- req = self.new_update_request('ports', data, p1['id'])
+ req = self.new_update_request('ports', data, p1['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIn('port', res)
self.assertEqual(new_mac, res['port']['mac_address'])
@@ -1080,6 +1115,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
with self.port(subnet=subnet, cidr='10.0.0.0/24',
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
fixed_ips=fixed_ips,
@@ -1094,7 +1130,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.10'}]}}
- self.new_update_request('ports', data, p['id'])
+ self.new_update_request('ports', data, p['id'],
+ as_admin=True)
l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
l2pop_mech.L2PopulationAgentNotify = mock.Mock()
l2notify = l2pop_mech.L2PopulationAgentNotify
@@ -1109,6 +1146,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
with self.port(subnet=subnet, cidr='10.0.0.0/24',
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
fixed_ips=fixed_ips,
@@ -1125,7 +1163,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.10'}]}}
- req = self.new_update_request('ports', data, p1['id'])
+ req = self.new_update_request('ports', data, p1['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
@@ -1143,7 +1182,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.16'}]}}
- req = self.new_update_request('ports', data, p1['id'])
+ req = self.new_update_request('ports', data, p1['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
@@ -1162,7 +1202,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}}
- req = self.new_update_request('ports', data, p1['id'])
+ req = self.new_update_request('ports', data, p1['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(1, len(ips))
@@ -1182,6 +1223,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -1204,6 +1246,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
self._register_ml2_agents()
host_arg = {portbindings.HOST_ID: HOST}
with self.port(arg_list=(portbindings.HOST_ID,),
+ is_admin=True,
**host_arg) as port:
port_id = port['port']['id']
# ensure various formats all result in correct port_id
@@ -1217,7 +1260,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
def _update_and_check_portbinding(self, port_id, host_id):
data = {'port': {portbindings.HOST_ID: host_id}}
- req = self.new_update_request('ports', data, port_id)
+ req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(host_id, res['port'][portbindings.HOST_ID])
@@ -1227,6 +1271,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -1326,6 +1371,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
arg_list=(portbindings.HOST_ID,),
**host_arg) as p:
diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
index 50432450d1..732573c721 100644
--- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
+++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
@@ -449,11 +449,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
{'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 'lsw1',
'tag': 1024, 'parent_name': 'fakename'},
]
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
# succeed without binding:profile
- with self.port(subnet=subnet1,
- set_context=True, tenant_id='test'):
+ with self.port(subnet=subnet1):
pass
# fail with invalid binding profiles
for invalid_profile in invalid_binding_profiles:
@@ -465,7 +464,6 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
expected_res_status=403,
arg_list=(
ovn_const.OVN_PORT_BINDING_PROFILE,),
- set_context=True, tenant_id='test',
**kwargs):
pass
except exc.HTTPClientError:
@@ -534,10 +532,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
'opt_value': 'apple'},
{'ip_version': 6, 'opt_name': 'grape',
'opt_value': 'grape'}]}}
- with self.network(set_context=True, tenant_id='test') as net:
+ with self.network() as net:
with self.subnet(network=net) as subnet:
- with self.port(subnet=subnet,
- set_context=True, tenant_id='test') as port:
+ with self.port(subnet=subnet) as port:
port_id = port['port']['id']
self._update('ports', port_id, data)
@@ -548,11 +545,12 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
mock_log.assert_has_calls([expected_call])
def test_create_and_update_ignored_fip_port(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
- with self.port(subnet=subnet1,
- device_owner=const.DEVICE_OWNER_FLOATINGIP,
- set_context=True, tenant_id='test') as port:
+ with self.port(
+ subnet=subnet1,
+ is_admin=True,
+ device_owner=const.DEVICE_OWNER_FLOATINGIP) as port:
self.nb_ovn.create_lswitch_port.assert_not_called()
data = {'port': {'name': 'new'}}
req = self.new_update_request('ports', data,
@@ -562,15 +560,17 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.nb_ovn.set_lswitch_port.assert_not_called()
def test_update_ignored_port_from_fip_device_owner(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
- with self.port(subnet=subnet1,
- device_owner=const.DEVICE_OWNER_FLOATINGIP,
- set_context=True, tenant_id='test') as port:
+ with self.port(
+ subnet=subnet1,
+ is_admin=True,
+ device_owner=const.DEVICE_OWNER_FLOATINGIP) as port:
self.nb_ovn.create_lswitch_port.assert_not_called()
data = {'port': {'device_owner': 'test'}}
req = self.new_update_request('ports', data,
- port['port']['id'])
+ port['port']['id'],
+ as_admin=True)
res = req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
msg = jsonutils.loads(res.body)['NeutronError']['message']
@@ -581,17 +581,18 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.nb_ovn.set_lswitch_port.assert_not_called()
def test_update_ignored_port_to_fip_device_owner(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
- device_owner='test',
- set_context=True, tenant_id='test') as port:
+ is_admin=True,
+ device_owner='test') as port:
self.assertEqual(
1, self.nb_ovn.create_lswitch_port.call_count)
data = {'port': {'device_owner':
const.DEVICE_OWNER_FLOATINGIP}}
req = self.new_update_request('ports', data,
- port['port']['id'])
+ port['port']['id'],
+ as_admin=True)
res = req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
msg = jsonutils.loads(res.body)['NeutronError']['message']
@@ -605,11 +606,11 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
kwargs = {'mac_address': '00:00:00:00:00:01',
'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.4'}]}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
+ is_admin=True,
arg_list=('mac_address', 'fixed_ips'),
- set_context=True, tenant_id='test',
**kwargs) as port:
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
called_args_dict = (
@@ -621,7 +622,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
data = {'port': {'mac_address': '00:00:00:00:00:02'}}
req = self.new_update_request(
'ports',
- data, port['port']['id'])
+ data, port['port']['id'],
+ as_admin=True)
req.get_response(self.api)
self.assertTrue(self.nb_ovn.set_lswitch_port.called)
called_args_dict = (
@@ -635,11 +637,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
# be treated as VIP.
kwargs = {'port_security_enabled': False,
'device_owner': 'compute:nova'}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('port_security_enabled',),
- set_context=True, tenant_id='test',
**kwargs) as port:
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
called_args_dict = (
@@ -653,7 +654,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
data = {'port': {'mac_address': '00:00:00:00:00:01'}}
req = self.new_update_request(
'ports',
- data, port['port']['id'])
+ data, port['port']['id'],
+ as_admin=True)
req.get_response(self.api)
self.assertTrue(self.nb_ovn.set_lswitch_port.called)
called_args_dict = (
@@ -687,11 +689,11 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
{"ip_address": "2.2.2.2",
"mac_address": "22:22:22:22:22:22"}],
'device_owner': 'compute:nova'}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
+ is_admin=True,
arg_list=('allowed_address_pairs',),
- set_context=True, tenant_id='test',
**kwargs) as port:
port_ip = port['port'].get('fixed_ips')[0]['ip_address']
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
@@ -718,7 +720,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
data = {'port': {'mac_address': '00:00:00:00:00:01'}}
req = self.new_update_request(
'ports',
- data, port['port']['id'])
+ data, port['port']['id'],
+ as_admin=True)
req.get_response(self.api)
self.assertTrue(self.nb_ovn.set_lswitch_port.called)
called_args_dict = (
@@ -737,10 +740,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
called_args_dict.get('addresses'))
def test_create_port_ovn_octavia_vip(self):
- with (self.network(set_context=True, tenant_id='test')) as net1, (
- self.subnet(network=net1)) as subnet1, (
+ with self.network() as net1,\
+ self.subnet(network=net1) as subnet1,\
self.port(name=ovn_const.LB_VIP_PORT_PREFIX + 'foo',
- subnet=subnet1, set_context=True, tenant_id='test')):
+ subnet=subnet1):
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
called_args_dict = (
@@ -865,6 +868,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '2'}
net = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
@@ -884,11 +888,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_create_port_without_security_groups(self):
kwargs = {'security_groups': []}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('security_groups',),
- set_context=True, tenant_id='test',
**kwargs):
self.assertEqual(
1, self.nb_ovn.create_lswitch_port.call_count)
@@ -896,22 +899,20 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_create_port_without_security_groups_no_ps(self):
kwargs = {'security_groups': [], 'port_security_enabled': False}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('security_groups',
'port_security_enabled'),
- set_context=True, tenant_id='test',
**kwargs):
self.assertEqual(
1, self.nb_ovn.create_lswitch_port.call_count)
self.nb_ovn.add_acl.assert_not_called()
def test_update_port_changed_security_groups(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
- with self.port(subnet=subnet1,
- set_context=True, tenant_id='test') as port1:
+ with self.port(subnet=subnet1) as port1:
sg_id = port1['port']['security_groups'][0]
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
@@ -938,10 +939,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.assertTrue(self.nb_ovn.pg_add_ports.called)
def test_update_port_unchanged_security_groups(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
- with self.port(subnet=subnet1,
- set_context=True, tenant_id='test') as port1:
+ with self.port(subnet=subnet1) as port1:
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
port1['port']))
@@ -967,11 +967,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_update_port_vip(self, is_vip=True):
kwargs = {}
- with (
- self.network(set_context=True, tenant_id='test')) as net1, (
- self.subnet(network=net1)) as subnet1, (
- self.port(subnet=subnet1, set_context=True,
- tenant_id='test', **kwargs)) as port1:
+ with self.network() as net1, \
+ self.subnet(network=net1) as subnet1, \
+ self.port(subnet=subnet1, **kwargs) as port1:
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
port1['port']))
@@ -1001,11 +999,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_delete_port_without_security_groups(self):
kwargs = {'security_groups': []}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('security_groups',),
- set_context=True, tenant_id='test',
**kwargs) as port1:
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
@@ -1022,10 +1019,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_delete_port_exception_delete_revision(self, mock_del_port,
mock_del_rev):
mock_del_port.side_effect = Exception('BoOoOoOoOmmmmm!!!')
- with self.network(set_context=True, tenant_id='test') as net:
+ with self.network() as net:
with self.subnet(network=net) as subnet:
- with self.port(subnet=subnet,
- set_context=True, tenant_id='test') as port:
+ with self.port(subnet=subnet) as port:
self._delete('ports', port['port']['id'])
# Assert that delete_revision wasn't invoked
mock_del_rev.assert_not_called()
@@ -1035,10 +1031,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_delete_port_not_exist_in_ovn(self, mock_del_port,
mock_del_rev):
mock_del_port.side_effect = idlutils.RowNotFound
- with self.network(set_context=True, tenant_id='test') as net:
+ with self.network() as net:
with self.subnet(network=net) as subnet:
- with self.port(subnet=subnet,
- set_context=True, tenant_id='test') as port:
+ with self.port(subnet=subnet) as port:
self._delete('ports', port['port']['id'])
# Assert that delete_revision wasn't invoked
mock_del_rev.assert_not_called()
@@ -1050,14 +1045,13 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
created_at = timeutils.utcnow() - datetime.timedelta(
seconds=ovn_const.DB_CONSISTENCY_CHECK_INTERVAL * 2)
mock_del_port.side_effect = idlutils.RowNotFound
- with self.network(set_context=True, tenant_id='test') as net:
+ with self.network() as net:
with self.subnet(network=net) as subnet:
- with self.port(subnet=subnet,
- set_context=True, tenant_id='test') as port, \
- mock.patch.object(ovn_revision_numbers_db,
- 'get_revision_row',
- return_value=OvnRevNumberRow(
- created_at=created_at)):
+ with self.port(subnet=subnet) as port, \
+ mock.patch.object(ovn_revision_numbers_db,
+ 'get_revision_row',
+ return_value=OvnRevNumberRow(
+ created_at=created_at)):
self._delete('ports', port['port']['id'])
# Assert that delete_revision was invoked
mock_del_rev.assert_called_once_with(mock.ANY,
@@ -1067,10 +1061,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_set_port_status_up(self, is_compute_port=False):
port_device_owner = 'compute:nova' if is_compute_port else ''
self.mech_driver._plugin.nova_notifier = mock.Mock()
- with self.network(set_context=True, tenant_id='test') as net1, \
+ with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
- self.port(subnet=subnet1, set_context=True,
- tenant_id='test',
+ self.port(subnet=subnet1, is_admin=True,
device_owner=port_device_owner) as port1, \
mock.patch.object(provisioning_blocks,
'provisioning_complete') as pc, \
@@ -1106,10 +1099,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_set_port_status_down(self, is_compute_port=False):
port_device_owner = 'compute:nova' if is_compute_port else ''
self.mech_driver._plugin.nova_notifier = mock.Mock()
- with self.network(set_context=True, tenant_id='test') as net1, \
+ with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
- self.port(subnet=subnet1, set_context=True,
- tenant_id='test',
+ self.port(subnet=subnet1, is_admin=True,
device_owner=port_device_owner) as port1, \
mock.patch.object(provisioning_blocks,
'add_provisioning_component') as apc, \
@@ -1158,10 +1150,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_set_port_status_concurrent_delete(self):
exc = os_db_exc.DBReferenceError('', '', '', '')
- with self.network(set_context=True, tenant_id='test') as net1, \
+ with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
- self.port(subnet=subnet1, set_context=True,
- tenant_id='test') as port1, \
+ self.port(subnet=subnet1) as port1, \
mock.patch.object(provisioning_blocks,
'add_provisioning_component',
side_effect=exc) as apc, \
@@ -2411,7 +2402,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_update_network_fragmentation(self, new_mtu, expected_opts, grps):
network_attrs = {external_net.EXTERNAL: True}
network = self._make_network(
- self.fmt, 'net1', True, arg_list=(external_net.EXTERNAL,),
+ self.fmt, 'net1', True, as_admin=True,
+ arg_list=(external_net.EXTERNAL,),
**network_attrs)
with self.subnet(network=network) as subnet:
@@ -2712,6 +2704,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '1'}
net = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
@@ -2724,7 +2717,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
# Issue an update to the network changing the segmentation_id
data = {'network': {pnet.SEGMENTATION_ID: new_vlan_tag}}
- req = self.new_update_request('networks', data, net['id'])
+ req = self.new_update_request('networks', data, net['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(new_vlan_tag, res['network'][pnet.SEGMENTATION_ID])
@@ -2854,6 +2848,7 @@ class TestOVNMechanismDriverSubnetsV2(test_plugin.TestMl2SubnetsV2,
net_arg = {pnet.NETWORK_TYPE: 'geneve',
pnet.SEGMENTATION_ID: '1'}
network = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
@@ -3022,7 +3017,7 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
segment = self._test_create_segment(
network_id=net['id'], physical_network='physnet1',
segmentation_id=200, network_type='vlan')['segment']
- self._delete('segments', segment['id'])
+ self._delete('segments', segment['id'], as_admin=True)
ovn_nb_api.delete_lswitch_port.assert_called_once_with(
lport_name=ovn_utils.ovn_provnet_port_name(segment['id']),
lswitch_name=ovn_utils.ovn_name(net['id']))
@@ -3050,12 +3045,12 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
'options': {'network_name': 'physnet2'},
'tag': 300,
'name': ovn_utils.ovn_provnet_port_name(seg_2['id'])})]
- self._delete('segments', seg_1['id'])
+ self._delete('segments', seg_1['id'], as_admin=True)
ovn_nb_api.delete_lswitch_port.assert_called_once_with(
lport_name=ovn_utils.ovn_provnet_port_name(net['id']),
lswitch_name=ovn_utils.ovn_name(net['id']))
ovn_nb_api.delete_lswitch_port.reset_mock()
- self._delete('segments', seg_2['id'])
+ self._delete('segments', seg_2['id'], as_admin=True)
ovn_nb_api.delete_lswitch_port.assert_called_once_with(
lport_name=ovn_utils.ovn_provnet_port_name(seg_2['id']),
lswitch_name=ovn_utils.ovn_name(net['id']))
@@ -3159,8 +3154,8 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
ovn_nb_api.delete_lswitch_port.assert_not_called()
# Delete both segments
- self._delete('segments', self.seg_2['id'])
- self._delete('segments', self.seg_1['id'])
+ self._delete('segments', self.seg_2['id'], as_admin=True)
+ self._delete('segments', self.seg_1['id'], as_admin=True)
# Make sure that the metadata port wasn't deleted.
deleted_ports = [
@@ -4096,7 +4091,7 @@ class TestOVNMechanismDriverSecurityGroup(MechDriverSetupBase,
1, self.mech_driver.nb_ovn.pg_acl_del.call_count)
def test_delete_port_with_security_groups_port_doesnt_remove_pg(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1):
sg = self._create_sg('sg')
port = self._make_port(
@@ -4174,7 +4169,7 @@ class TestOVNMechanismDriverMetadataPort(MechDriverSetupBase,
"""
self.mech_driver.nb_ovn.get_subnet_dhcp_options.return_value = {
'subnet': {}, 'ports': {}}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1, cidr='10.0.0.0/24') as subnet1:
with self.subnet(network=net1,
cidr='20.0.0.0/24') as subnet2:
@@ -4215,6 +4210,7 @@ class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase):
self._create_port(
self.fmt, n['network']['id'],
expected_res_status=404,
+ is_admin=True,
arg_list=(OVN_PROFILE,),
**binding)
@@ -4226,6 +4222,7 @@ class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase):
with self.port(s) as p:
binding[OVN_PROFILE]['parent_name'] = p['port']['id']
res = self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
**binding)
port = self.deserialize(self.fmt, res)
@@ -4240,6 +4237,7 @@ class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase):
with self.port(s) as p:
binding[OVN_PROFILE]['parent_name'] = p['port']['id']
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@@ -4253,6 +4251,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
**binding)
port = self.deserialize(self.fmt, res)
@@ -4264,6 +4263,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@@ -4273,6 +4273,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@@ -4283,6 +4284,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@@ -4294,6 +4296,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=404,
**binding)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py
index ad7d56a39e..a14f3feb29 100644
--- a/neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py
+++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py
@@ -39,10 +39,12 @@ class TestMigrateNeutronDatabaseToOvn(
for sid in range(1, 6):
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: sid}
- network_id = self._make_network(self.fmt, 'net%d' % sid, True,
+ network_id = self._make_network(
+ self.fmt, 'net%d' % sid, True, as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
- **net_arg)['network']['id']
+ **net_arg
+ )['network']['id']
for vif_details in vif_details_list:
port = self._make_port(self.fmt, network_id)['port']
diff --git a/neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py b/neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py
index e656e250f5..b658d96c1c 100644
--- a/neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py
+++ b/neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py
@@ -50,7 +50,7 @@ class DNSDomainKeywordsTestCase(
net_kwargs.get('arg_list', ()) + (dns_apidef.DNSDOMAIN,)
net_kwargs['shared'] = True
res = self._create_network(self.fmt, 'test_network', True,
- **net_kwargs)
+ as_admin=True, **net_kwargs)
network = self.deserialize(self.fmt, res)
if ipv4:
cidr = '10.0.0.0/24'
@@ -108,8 +108,8 @@ class DNSDomainKeywordsTestCase(
# NOTE(slaweq): Admin context is required here to be able to update
# fixed_ips of the port as by default it is not possible for non-admin
# users
- ctx = context.Context(project_id=PROJECT_ID, is_admin=True)
- req = self.new_update_request('ports', data, port['id'], context=ctx)
+ req = self.new_update_request('ports', data, port['id'],
+ tenant_id=PROJECT_ID, as_admin=True)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
port = self.deserialize(self.fmt, res)['port']
diff --git a/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py b/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py
index 75783ad0b4..6d1a19e457 100644
--- a/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py
+++ b/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py
@@ -80,7 +80,7 @@ class DNSIntegrationTestCase(test_plugin.Ml2PluginV2TestCase):
net_kwargs['arg_list'] = \
net_kwargs.get('arg_list', ()) + (dns_apidef.DNSDOMAIN,)
res = self._create_network(self.fmt, 'test_network', True,
- **net_kwargs)
+ as_admin=True, **net_kwargs)
network = self.deserialize(self.fmt, res)
if ipv4:
cidr = '10.0.0.0/24'
diff --git a/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py b/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py
index 6131f3611f..daa6e72542 100644
--- a/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py
+++ b/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py
@@ -50,12 +50,10 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
def test_create_ports_bulk_with_tags(self):
num_ports = 3
- tenant_id = 'some_tenant'
- with self.network(tenant_id=tenant_id) as network_to_use:
+ with self.network() as network_to_use:
net_id = network_to_use['network']['id']
port = {'port': {'network_id': net_id,
- 'admin_state_up': True,
- 'tenant_id': tenant_id}}
+ 'admin_state_up': True}}
ports = [copy.deepcopy(port) for x in range(num_ports)]
ports_tags_map = {}
for port, tags in zip(ports, TAGS):
@@ -73,13 +71,11 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
def test_create_ports_bulk_no_tags(self):
num_ports = 2
- tenant_id = 'some_tenant'
- with self.network(tenant_id=tenant_id) as network_to_use:
+ with self.network() as network_to_use:
net_id = network_to_use['network']['id']
port = {'port': {'name': 'port',
'network_id': net_id,
- 'admin_state_up': True,
- 'tenant_id': tenant_id}}
+ 'admin_state_up': True}}
ports = [copy.deepcopy(port) for x in range(num_ports)]
req_body = {'ports': ports}
ports_req = self.new_create_request('ports', req_body)
@@ -90,13 +86,11 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertFalse(port['tags'])
def test_create_port_with_tags(self):
- tenant_id = 'some_tenant'
- with self.network(tenant_id=tenant_id) as network_to_use:
+ with self.network() as network_to_use:
net_id = network_to_use['network']['id']
req_body = {'port': {'name': 'port',
'network_id': net_id,
'admin_state_up': True,
- 'tenant_id': tenant_id,
'tags': TAGS[0]}}
port_req = self.new_create_request('ports', req_body)
res = port_req.get_response(self.api)
@@ -106,16 +100,14 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
def test_type_args_passed_to_extension(self):
num_ports = 2
- tenant_id = 'some_tenant'
extension = tag_ports_during_bulk_creation
with mock.patch.object(
extension.TagPortsDuringBulkCreationExtensionDriver,
'process_create_port') as patched_method:
- with self.network(tenant_id=tenant_id) as network_to_use:
+ with self.network() as network_to_use:
net_id = network_to_use['network']['id']
port = {'port': {'network_id': net_id,
- 'admin_state_up': True,
- 'tenant_id': tenant_id}}
+ 'admin_state_up': True}}
ports = [copy.deepcopy(port) for x in range(num_ports)]
ports[0]['port']['tags'] = TAGS[0]
ports[1]['port']['tags'] = TAGS[1]
diff --git a/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py b/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py
index d51a264527..2b673cf499 100644
--- a/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py
+++ b/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py
@@ -16,7 +16,6 @@ from neutron_lib import constants
from neutron_lib import context
from neutron_lib.plugins import directory
from oslo_config import cfg
-from oslo_utils import uuidutils
from neutron.tests.unit.plugins.ml2.drivers import ext_test
from neutron.tests.unit.plugins.ml2 import test_plugin
@@ -35,9 +34,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
self._ctxt = context.get_admin_context()
def _verify_network_create(self, code, exc_reason):
- tenant_id = uuidutils.generate_uuid()
- data = {'network': {'name': 'net1',
- 'tenant_id': tenant_id}}
+ data = {'network': {'name': 'net1'}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(code, res.status_int)
@@ -47,7 +44,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertEqual(exc_reason,
network['NeutronError']['type'])
- return (network, tenant_id)
+ return network
def _verify_network_update(self, network, code, exc_reason):
net_id = network['network']['id']
@@ -64,10 +61,9 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_create_network',
side_effect=TypeError):
- net, tenant_id = self._verify_network_create(500,
- 'HTTPInternalServerError')
+ self._verify_network_create(500, 'HTTPInternalServerError')
# Verify the operation is rolled back
- query_params = "tenant_id=%s" % tenant_id
+ query_params = "tenant_id=%s" % self._tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
@@ -75,7 +71,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_network',
side_effect=TypeError):
- network, tid = self._verify_network_create(201, None)
+ network = self._verify_network_create(201, None)
self._verify_network_update(network, 500,
'HTTPInternalServerError')
@@ -83,7 +79,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
with mock.patch.object(ext_test.TestExtensionDriver,
'extend_network_dict',
side_effect=[None, None, TypeError]):
- network, tid = self._verify_network_create(201, None)
+ network = self._verify_network_create(201, None)
self._verify_network_update(network, 400, 'ExtensionDriverError')
def test_network_attr(self):
diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py
index d355dac420..b6130fdf3b 100644
--- a/neutron/tests/unit/plugins/ml2/test_plugin.py
+++ b/neutron/tests/unit/plugins/ml2/test_plugin.py
@@ -381,7 +381,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
for net_idx, net in enumerate(networks):
# create
req = self.new_create_request('networks',
- {'network': net})
+ {'network': net},
+ as_admin=True)
# verify
network = self.deserialize(self.fmt,
req.get_response(self.api))['network']
@@ -399,7 +400,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
def _lookup_network_by_segmentation_id(self, seg_id, num_expected_nets):
params_str = "%s=%s" % (pnet.SEGMENTATION_ID, seg_id)
net_req = self.new_list_request('networks', None,
- params=params_str)
+ params=params_str,
+ as_admin=True)
networks = self.deserialize(self.fmt, net_req.get_response(self.api))
if num_expected_nets:
self.assertIsNotNone(networks)
@@ -446,9 +448,9 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
plugin.type_manager, 'create_network_segments',
side_effect=db_exc.RetryRequest(ValueError())
) as f:
- data = {'network': {'tenant_id': 'sometenant', 'name': 'dummy',
+ data = {'network': {'name': 'dummy',
'admin_state_up': True, 'shared': False}}
- req = self.new_create_request('networks', data)
+ req = self.new_create_request('networks', data, as_admin=True)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
# 1 + retry count
@@ -459,7 +461,7 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
plugin = directory.get_plugin()
kwargs = {'arg_list': (pnet.NETWORK_TYPE, ),
pnet.NETWORK_TYPE: 'vlan'}
- with self.network(**kwargs) as net:
+ with self.network(as_admin=True, **kwargs) as net:
for attribute in set(pnet.ATTRIBUTES) - {pnet.SEGMENTATION_ID}:
net_data = {attribute: net['network'][attribute]}
self.assertIsNone(
@@ -491,7 +493,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 2}]
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: segments}) as net:
self.assertRaises(
exc.InvalidInput, plugin._update_segmentation_id, self.context,
@@ -518,7 +521,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
mock.patch.object(type(mech_driver), 'agent_type',
new_callable=mock.PropertyMock(return_value=None)).start()
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: segments}) as net, \
mock.patch.object(
port_obj.Port, 'check_network_ports_by_binding_types',
@@ -598,7 +602,8 @@ class TestMl2NetworksV2AgentMechDrivers(Ml2PluginV2TestCase):
segments = [{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}]
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: segments}) as net, \
mock.patch.object(
port_obj.Port, 'check_network_ports_by_binding_types',
@@ -623,9 +628,8 @@ class TestExternalNetwork(Ml2PluginV2TestCase):
def _create_external_network(self):
data = {'network': {'name': 'net1',
- 'router:external': 'True',
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ 'router:external': 'True'}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
return network
@@ -669,7 +673,6 @@ class TestMl2NetworksWithVlanTransparencyBase(TestMl2NetworksV2):
mpnet_apidef.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
- 'tenant_id': 'tenant_one',
'vlan_transparent': 'True'}}
def setUp(self, plugin=None):
@@ -685,7 +688,8 @@ class TestMl2NetworksWithVlanTransparency(
with mock.patch.object(mech_test.TestMechanismDriver,
'check_vlan_transparency',
return_value=False):
- network_req = self.new_create_request('networks', self.data)
+ network_req = self.new_create_request(
+ 'networks', self.data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(500, res.status_int)
error_result = self.deserialize(self.fmt, res)['NeutronError']
@@ -696,7 +700,8 @@ class TestMl2NetworksWithVlanTransparency(
with mock.patch.object(mech_test.TestMechanismDriver,
'check_vlan_transparency',
return_value=True):
- network_req = self.new_create_request('networks', self.data)
+ network_req = self.new_create_request(
+ 'networks', self.data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
network = self.deserialize(self.fmt, res)['network']
@@ -713,7 +718,8 @@ class TestMl2NetworksWithVlanTransparencyAndMTU(
return_value=True):
cfg.CONF.set_override('path_mtu', 1000, group='ml2')
cfg.CONF.set_override('global_physnet_mtu', 1000)
- network_req = self.new_create_request('networks', self.data)
+ network_req = self.new_create_request(
+ 'networks', self.data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
network = self.deserialize(self.fmt, res)['network']
@@ -727,8 +733,7 @@ class TestMl2NetworksWithAvailabilityZone(TestMl2NetworksV2):
def test_create_network_availability_zone(self):
az_hints = ['az1', 'az2']
data = {'network': {'name': 'net1',
- az_def.AZ_HINTS: az_hints,
- 'tenant_id': 'tenant_one'}}
+ az_def.AZ_HINTS: az_hints}}
with mock.patch.object(agents_db.AgentAvailabilityZoneMixin,
'validate_availability_zones'):
network_req = self.new_create_request('networks', data)
@@ -879,6 +884,7 @@ class TestMl2SubnetsV2(test_plugin.TestSubnetsV2,
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '1'}
network = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
@@ -1280,7 +1286,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
def test_update_port_with_empty_data(self):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
- with self.port() as port:
+ with self.port(is_admin=True) as port:
port_id = port['port']['id']
new_port = plugin.update_port(ctx, port_id, {"port": {}})
new_port.pop('standard_attr_id')
@@ -1422,7 +1428,8 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
data = {'port': {'mac_address': None}}
with self.port() as port:
current_mac = port['port']['mac_address']
- req = self.new_update_request('ports', data, port['port']['id'])
+ req = self.new_update_request(
+ 'ports', data, port['port']['id'], as_admin=True)
self.assertEqual(200, req.get_response(self.api).status_int)
new_mac = plugin.get_port(ctx, port['port']['id'])['mac_address']
self.assertNotEqual(current_mac, new_mac)
@@ -1458,7 +1465,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
l3plugin = directory.get_plugin(plugin_constants.L3)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as n:
+ with self.network(as_admin=True, **kwargs) as n:
with self.subnet(network=n, cidr='200.0.0.0/22'):
l3plugin.create_floatingip(
context.get_admin_context(),
@@ -1488,24 +1495,23 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
res, 'ports', webob.exc.HTTPServerError.code)
def test_create_ports_bulk_with_sec_grp(self):
- ctx = context.get_admin_context()
plugin = directory.get_plugin()
with self.network() as net,\
mock.patch.object(plugin.notifier,
'security_groups_member_updated') as m_upd:
res = self._create_port_bulk(self.fmt, 3, net['network']['id'],
- 'test', True, context=ctx)
+ 'test', True)
ports = self.deserialize(self.fmt, res)
if 'ports' in ports:
used_sg = ports['ports'][0]['security_groups']
m_upd.assert_has_calls(
- [mock.call(ctx, [sg]) for sg in used_sg], any_order=True)
+ [mock.call(mock.ANY, [sg]) for sg in used_sg],
+ any_order=True)
else:
self.assertTrue('ports' in ports)
def test_create_ports_bulk_with_portbinding_attrs(self):
- ctx = context.get_admin_context()
with self.network() as net:
overrides = {0: {portbindings.HOST_ID: 'host1',
portbindings.VNIC_TYPE: 'direct',
@@ -1514,7 +1520,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
portbindings.VNIC_TYPE: 'macvtap',
portbindings.PROFILE: {'bar': 'bar'}}}
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
- 'test', True, context=ctx,
+ 'test', True, as_admin=True,
override=overrides)
ports = self.deserialize(self.fmt, res)['ports']
self.assertCountEqual(['direct', 'macvtap'],
@@ -1525,7 +1531,6 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
[p[portbindings.HOST_ID] for p in ports])
def test_create_ports_bulk_with_sec_grp_member_provider_update(self):
- ctx = context.get_admin_context()
plugin = directory.get_plugin()
bulk_mock_name = "security_groups_member_updated"
with self.network() as net,\
@@ -1534,28 +1539,25 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
net_id = net['network']['id']
data = [{
'network_id': net_id,
- 'tenant_id': self._tenant_id
},
{
'network_id': net_id,
- 'tenant_id': self._tenant_id,
'device_owner': constants.DEVICE_OWNER_DHCP
}
]
- res = self._create_bulk_from_list(self.fmt, 'port',
- data, context=ctx)
+ res = self._create_bulk_from_list(self.fmt, 'port', data,
+ as_admin=True)
ports = self.deserialize(self.fmt, res)
used_sg = ports['ports'][0]['security_groups']
- m_upd.assert_called_with(ctx, used_sg)
+ m_upd.assert_called_with(mock.ANY, used_sg)
m_upd.reset_mock()
data[0]['device_owner'] = constants.DEVICE_OWNER_DHCP
self._create_bulk_from_list(self.fmt, 'port',
- data, context=ctx)
+ data, as_admin=True)
self.assertFalse(m_upd.called)
def test_create_ports_bulk_with_sec_grp_provider_update_ipv6(self):
- ctx = context.get_admin_context()
plugin = directory.get_plugin()
fake_prefix = '2001:db8::/64'
fake_gateway = 'fe80::1'
@@ -1571,13 +1573,12 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
net_id = net['network']['id']
data = [{
'network_id': net_id,
- 'tenant_id': self._tenant_id,
'fixed_ips': [{'subnet_id': snet_v6['subnet']['id']}],
'device_owner': constants.DEVICE_OWNER_ROUTER_INTF
}
]
self._create_bulk_from_list(self.fmt, 'port',
- data, context=ctx)
+ data, as_admin=True)
self.assertFalse(m_upd.called)
def test_create_ports_bulk_ip_allocation_reverted_in_case_of_error(self):
@@ -1842,7 +1843,8 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
port_kwargs = {portbindings.HOST_ID: 'host1',
'subnet': subnet,
'device_id': 'deadlocktest'}
- with self.port(arg_list=(portbindings.HOST_ID,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**port_kwargs) as port:
self.assertTrue(port['port']['id'])
self.assertTrue(get_port_mock.called)
@@ -2037,7 +2039,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
host_arg = {portbindings.HOST_ID: HOST}
- with self.port(arg_list=(portbindings.HOST_ID,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port = plugin.get_port(ctx, port['port']['id'])
updated_ports = []
@@ -2066,7 +2069,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
registry.subscribe(creceiver, resources.PORT,
events.AFTER_CREATE)
host_arg = {portbindings.HOST_ID: HOST}
- with self.port(arg_list=(portbindings.HOST_ID,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg):
self.assertGreater(updated_ports[0]['revision_number'],
created_ports[0]['revision_number'])
@@ -2079,7 +2083,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
registry.subscribe(p_update_receiver, resources.PORT,
events.AFTER_UPDATE)
host_arg = {portbindings.HOST_ID: HOST}
- with self.port(device_owner=constants.DEVICE_OWNER_DVR_INTERFACE,
+ with self.port(is_admin=True,
+ device_owner=constants.DEVICE_OWNER_DVR_INTERFACE,
device_id=TEST_ROUTER_ID,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@@ -2116,7 +2121,8 @@ class TestMl2PortsV2WithL3(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST}
with mock.patch.object(l3plugin.l3_rpc_notifier,
'routers_updated_on_host') as mock_updated:
- with self.port(device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
+ with self.port(is_admin=True,
+ device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
device_id=TEST_ROUTER_ID,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@@ -2399,7 +2405,7 @@ class TestMl2DvrPortsV2(TestMl2PortsV2):
if floating_ip:
router_ids.add(ns_to_delete['router_id'])
- with self.port() as port, \
+ with self.port(is_admin=True) as port, \
mock.patch.object(registry, 'publish') as publish, \
mock.patch.object(self.l3plugin,
'disassociate_floatingips',
@@ -2442,7 +2448,8 @@ class TestMl2DvrPortsV2(TestMl2PortsV2):
def test_delete_port_with_floatingip_create_precommit_event(self):
fake_method = mock.Mock()
- with self.port(device_owner='network:floatingip') as port:
+ with self.port(is_admin=True,
+ device_owner='network:floatingip') as port:
try:
registry.subscribe(fake_method, resources.FLOATING_IP,
events.PRECOMMIT_DELETE)
@@ -2534,6 +2541,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
profile_arg = {portbindings.PROFILE: {'d': s}}
try:
with self.port(expected_res_status=400,
+ is_admin=True,
arg_list=(portbindings.PROFILE,),
**profile_arg):
pass
@@ -2543,15 +2551,17 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_remove_port_binding_profile(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
- with self.port(arg_list=(portbindings.PROFILE,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
profile_arg = {portbindings.PROFILE: None}
port = self._update('ports', port_id,
- {'port': profile_arg})['port']
+ {'port': profile_arg},
+ as_admin=True)['port']
self._check_port_binding_profile(port)
- port = self._show('ports', port_id)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_port_binding_profile(port)
def test_return_on_concurrent_delete_and_binding(self):
@@ -2744,15 +2754,17 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_port_binding_profile_not_changed(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
- with self.port(arg_list=(portbindings.PROFILE,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
state_arg = {'admin_state_up': True}
port = self._update('ports', port_id,
- {'port': state_arg})['port']
+ {'port': state_arg},
+ as_admin=True)['port']
self._check_port_binding_profile(port, profile)
- port = self._show('ports', port_id)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_host_id_none(self):
@@ -2885,8 +2897,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
super(TestMultiSegmentNetworks, self).setUp()
def test_allocate_dynamic_segment(self):
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@@ -2914,8 +2925,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234)
def test_allocate_dynamic_segment_multiple_physnets(self):
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@@ -2950,8 +2960,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: physnet_name}
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@@ -3000,8 +3009,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
self.assertEqual(1, len(allocs))
def test_allocate_release_dynamic_segment(self):
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@@ -3026,9 +3034,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1,
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
@@ -3039,9 +3046,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def test_fail_update_network_provider_attr(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'flat',
- pnet.PHYSICAL_NETWORK: 'physnet1',
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.PHYSICAL_NETWORK: 'physnet1'}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE])
@@ -3051,7 +3057,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'update_physnet1'}}
network_req = self.new_update_request('networks', data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertIn('NeutronError', network)
@@ -3063,9 +3070,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def test_update_network_provider_attr_no_change(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'flat',
- pnet.PHYSICAL_NETWORK: 'physnet1',
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.PHYSICAL_NETWORK: 'physnet1'}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE])
@@ -3075,7 +3081,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'physnet1'}}
network_req = self.new_update_request('networks', data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('updated-net1', network['network']['name'])
@@ -3085,9 +3092,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
mpnet_apidef.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1}],
- 'tenant_id': 'tenant_one'}}
- net_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}]}}
+ net_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
@@ -3095,7 +3101,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
self.assertNotIn(mpnet_apidef.SEGMENTS, network['network'])
# Tests get_network()
- net_req = self.new_show_request('networks', network['network']['id'])
+ net_req = self.new_show_request('networks', network['network']['id'],
+ as_admin=True)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
@@ -3110,9 +3117,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
- pnet.SEGMENTATION_ID: 2}],
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 2}]}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segments = network['network'][mpnet_apidef.SEGMENTS]
@@ -3124,7 +3130,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
segments[segment_index][field])
# Tests get_network()
- net_req = self.new_show_request('networks', network['network']['id'])
+ net_req = self.new_show_request('networks', network['network']['id'],
+ as_admin=True)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
segments = network['network'][mpnet_apidef.SEGMENTS]
for segment_index, segment in enumerate(data['network']
@@ -3157,9 +3164,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1}],
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}]}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
@@ -3169,11 +3175,10 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'},
{pnet.NETWORK_TYPE: 'vlan',
- pnet.PHYSICAL_NETWORK: 'physnet1'}],
- 'tenant_id': 'tenant_one'}}
+ pnet.PHYSICAL_NETWORK: 'physnet1'}]}}
retry_fixture = fixture.DBRetryErrorsFixture(max_retries=2)
retry_fixture.setUp()
- network_req = self.new_create_request('networks', data)
+ network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
@@ -3183,9 +3188,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1,
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
@@ -3217,9 +3221,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1,
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
@@ -3394,9 +3397,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'create_network_postcommit',
side_effect=(exc.InvalidInput(
error_message=err_msg))):
- tenant_id = uuidutils.generate_uuid()
- data = {'network': {'name': 'net1',
- 'tenant_id': tenant_id}}
+ data = {'network': {'name': 'net1'}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(400, res.status_int)
@@ -3405,7 +3406,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
error['NeutronError']['type'])
# Check the client can see the root cause of error.
self.assertIn(err_msg, error['NeutronError']['message'])
- query_params = "tenant_id=%s" % tenant_id
+ query_params = "tenant_id=%s" % self._tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
@@ -3417,8 +3418,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_network_postcommit') as dnp:
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
@@ -3442,8 +3442,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_network_postcommit') as unp:
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
@@ -3481,8 +3480,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'cidr': '10.0.20.0/24',
'ip_version': constants.IP_VERSION_4,
'name': 'subnet1',
- 'tenant_id':
- network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
@@ -3510,8 +3507,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'cidr': '10.0.20.0/24',
'ip_version': constants.IP_VERSION_4,
'name': 'subnet1',
- 'tenant_id':
- network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
@@ -3543,8 +3538,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'cidr': '10.0.20.0/24',
'ip_version': constants.IP_VERSION_4,
'name': 'subnet1',
- 'tenant_id':
- network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
@@ -3579,8 +3572,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with self.network() as network:
net_id = network['network']['id']
data = {'port': {'network_id': net_id,
- 'tenant_id':
- network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
@@ -3606,8 +3597,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
- 'tenant_id':
- network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
@@ -3655,8 +3644,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
subnet_id = subnet['subnet']['id']
data = {'port': {
'network_id': network['network']['id'],
- 'tenant_id':
- network['network']['tenant_id'],
'name': 'port1',
'device_owner':
constants.DEVICE_OWNER_DVR_INTERFACE,
@@ -3691,7 +3678,7 @@ class TestML2PluggableIPAM(test_ipam.UseIpamMixin, TestMl2SubnetsV2):
request.subnet_cidr = netaddr.IPNetwork(cidr)
request.allocation_pools = []
request.gateway_ip = netaddr.IPAddress(gateway_ip)
- request.tenant_id = uuidutils.generate_uuid()
+ request.tenant_id = self._tenant_id
ipam_subnet = mock.Mock()
ipam_subnet.get_details.return_value = request
@@ -3910,7 +3897,8 @@ class TestML2Segments(Ml2PluginV2TestCase):
driver_api.PHYSICAL_NETWORK: physical_network,
driver_api.SEGMENTATION_ID: segmentation_id}
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: network_segments})\
as test_network:
multisegment_network = test_network['network']
@@ -3942,7 +3930,8 @@ class TestML2Segments(Ml2PluginV2TestCase):
driver_api.PHYSICAL_NETWORK: physical_network,
driver_api.SEGMENTATION_ID: segmentation_id}
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: network_segments})\
as test_network:
multisegment_network = test_network['network']
@@ -3968,7 +3957,7 @@ class TestML2Segments(Ml2PluginV2TestCase):
pnet.PHYSICAL_NETWORK: physical_network,
pnet.SEGMENTATION_ID: segmentation_id}
- with self.network() as test_network:
+ with self.network(as_admin=True) as test_network:
# network() implicitaly creates a single segment
single_segment_network = test_network['network']
observed_network = self.driver._build_original_network(
diff --git a/neutron/tests/unit/plugins/ml2/test_port_binding.py b/neutron/tests/unit/plugins/ml2/test_port_binding.py
index cf5db88eff..dcca1b6f62 100644
--- a/neutron/tests/unit/plugins/ml2/test_port_binding.py
+++ b/neutron/tests/unit/plugins/ml2/test_port_binding.py
@@ -78,7 +78,8 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
mac_address = 'aa:aa:aa:aa:aa:aa'
host_arg = {portbindings.HOST_ID: host,
'mac_address': mac_address}
- with self.port(name='name', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
self._check_response(port['port'], vif_type, has_port_filter,
bound, status)
@@ -152,12 +153,12 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
update_body = {'name': 'test_update'}
if new_host is not None:
update_body[portbindings.HOST_ID] = new_host
- with self.port(name='name', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
- neutron_context = context.get_admin_context()
updated_port = self._update('ports', port['port']['id'],
{'port': update_body},
- neutron_context=neutron_context)
+ as_admin=True)
port_data = updated_port['port']
if new_host is not None:
self.assertEqual(new_host,
@@ -190,7 +191,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
host_id = {portbindings.HOST_ID: 'host1'}
- with self.port(**host_id) as port:
+ with self.port(is_admin=True, **host_id) as port:
# Since the port is DOWN at first
# It's necessary to make its status ACTIVE for this test
plugin.update_port_status(ctx, port['port']['id'],
@@ -221,7 +222,8 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
def test_distributed_binding(self):
ctx = context.get_admin_context()
- with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
+ with self.port(is_admin=True,
+ device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
port_id = port['port']['id']
# Verify port's VIF type and status.
@@ -235,7 +237,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
'device_id': 'router1'}})
# Get port and verify VIF type and status unchanged.
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('DOWN', port['port']['status'])
@@ -247,7 +249,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
self.assertEqual('local', details['network_type'])
# Get port and verify VIF type and changed status.
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('BUILD', port['port']['status'])
@@ -258,7 +260,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
host='host-ovs-no_filter')
# Get port and verify VIF type and changed status.
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('ACTIVE', port['port']['status'])
@@ -269,7 +271,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
host='host-ovs-no_filter')
# Get port and verify VIF type and changed status.
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('DOWN', port['port']['status'])
@@ -382,7 +384,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
data = {'binding': kwargs}
binding_req = self.new_update_request('ports', data, port_id, fmt,
subresource='bindings',
- sub_id=host)
+ sub_id=host,
+ as_admin=True)
return binding_req.get_response(self.api)
def _do_update_port_binding(self, fmt, port_id, host, **kwargs):
@@ -457,7 +460,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
def test_create_duplicate_port_binding(self):
device_owner = '%s%s' % (const.DEVICE_OWNER_COMPUTE_PREFIX, 'nova')
host_arg = {portbindings.HOST_ID: self.host}
- with self.port(device_owner=device_owner,
+ with self.port(is_admin=True,
+ device_owner=device_owner,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
response = self._create_port_binding(self.fmt, port['port']['id'],
@@ -540,7 +544,7 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
active_binding = self._activate_port_binding(
port['id'], self.host, raw_response=False)
self._assert_bound_port_binding(active_binding)
- updated_port = self._show('ports', port['id'])['port']
+ updated_port = self._show('ports', port['id'], as_admin=True)['port']
updated_bound_drivers = updated_port[portbindings.VIF_DETAILS].pop(
portbindings.VIF_DETAILS_BOUND_DRIVERS)
self.assertEqual({'0': 'test'}, updated_bound_drivers)
@@ -711,7 +715,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
with mock.patch.object(
mechanism_test.TestMechanismDriver, '_check_port_context'
):
- req = self.new_update_request('ports', update_body, port_id)
+ req = self.new_update_request('ports', update_body, port_id,
+ as_admin=True)
self.assertEqual(200, req.get_response(self.api).status_int)
def test_bind_non_pf_port_with_mac_port_not_updated(self):
@@ -851,7 +856,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
with mock.patch.object(
mechanism_test.TestMechanismDriver, '_check_port_context'
):
- req = self.new_update_request('ports', update_body, port['id'])
+ req = self.new_update_request('ports', update_body, port['id'],
+ as_admin=True)
self.assertEqual(200, req.get_response(self.api).status_int)
# Neutron expected to reset the MAC to a generated one so that the
diff --git a/neutron/tests/unit/plugins/ml2/test_security_group.py b/neutron/tests/unit/plugins/ml2/test_security_group.py
index 3de28b6294..494059abe5 100644
--- a/neutron/tests/unit/plugins/ml2/test_security_group.py
+++ b/neutron/tests/unit/plugins/ml2/test_security_group.py
@@ -159,7 +159,8 @@ class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase,
self.assertFalse(self.was_active)
self._delete(
'security-groups',
- self._list('security-groups')['security_groups'][0]['id'])
+ self._list('security-groups')['security_groups'][0]['id'],
+ as_admin=True)
with self.port(subnet=s):
self.assertFalse(self.was_active)
diff --git a/neutron/tests/unit/plugins/ml2/test_tracked_resources.py b/neutron/tests/unit/plugins/ml2/test_tracked_resources.py
index 264dd8e908..5d5d1c1dd5 100644
--- a/neutron/tests/unit/plugins/ml2/test_tracked_resources.py
+++ b/neutron/tests/unit/plugins/ml2/test_tracked_resources.py
@@ -233,9 +233,8 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_networks_clears_dirty(self):
self._test_init('network')
- net = self._make_network('json', 'meh', True)['network']
- self.ctx.project_id = net['project_id']
- self._list('networks', neutron_context=self.ctx)
+ self._make_network('json', 'meh', True)['network']
+ self._list('networks', as_admin=True)
self._verify_dirty_bit('network', expected_value=False)
def test_create_delete_port_marks_dirty(self):
@@ -252,9 +251,8 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_ports_clears_dirty(self):
self._test_init('port')
net = self._make_network('json', 'meh', True)['network']
- port = self._make_port('json', net['id'])['port']
- self.ctx.project_id = port['project_id']
- self._list('ports', neutron_context=self.ctx)
+ self._make_port('json', net['id'])['port']
+ self._list('ports', as_admin=True)
self._verify_dirty_bit('port', expected_value=False)
def test_create_delete_subnet_marks_dirty(self):
@@ -286,17 +284,14 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_subnets_clears_dirty(self):
self._test_init('subnet')
net = self._make_network('json', 'meh', True)
- subnet = self._make_subnet('json', net, '10.0.0.1',
- '10.0.0.0/24')['subnet']
- self.ctx.project_id = subnet['project_id']
- self._list('subnets', neutron_context=self.ctx)
+ self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet']
+ self._list('subnets', as_admin=True)
self._verify_dirty_bit('subnet', expected_value=False)
def test_create_delete_subnetpool_marks_dirty(self):
self._test_init('subnetpool')
pool = self._make_subnetpool('json', ['10.0.0.0/8'],
- name='meh',
- tenant_id=self._project_id)['subnetpool']
+ name='meh')['subnetpool']
self._verify_dirty_bit('subnetpool')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
@@ -306,17 +301,14 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_subnetpools_clears_dirty(self):
self._test_init('subnetpool')
- pool = self._make_subnetpool('json', ['10.0.0.0/8'],
- name='meh',
- tenant_id=self._project_id)['subnetpool']
- self.ctx.project_id = pool['project_id']
- self._list('subnetpools', neutron_context=self.ctx)
+ self._make_subnetpool('json', ['10.0.0.0/8'], name='meh')['subnetpool']
+ self._list('subnetpools', as_admin=True)
self._verify_dirty_bit('subnetpool', expected_value=False)
def test_create_delete_securitygroup_marks_dirty(self):
self._test_init('security_group')
sec_group = self._make_security_group(
- 'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
+ 'json', 'meh', 'meh')['security_group']
self._verify_dirty_bit('security_group')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
@@ -327,17 +319,16 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_securitygroups_clears_dirty(self):
self._test_init('security_group')
self._make_security_group(
- 'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
- self.ctx.project_id = self._project_id
- self._list('security-groups', neutron_context=self.ctx)
+ 'json', 'meh', 'meh',)['security_group']
+ self._list('security-groups', as_admin=True)
self._verify_dirty_bit('security_group', expected_value=False)
def test_create_delete_securitygrouprule_marks_dirty(self):
self._test_init('security_group_rule')
sec_group = self._make_security_group(
- 'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
+ 'json', 'meh', 'meh')['security_group']
rule_req = self._build_security_group_rule(
- sec_group['id'], 'ingress', 'TCP', tenant_id=self._project_id)
+ sec_group['id'], 'ingress', 'TCP')
sec_group_rule = self._make_security_group_rule(
'json', rule_req)['security_group_rule']
self._verify_dirty_bit('security_group_rule')
@@ -349,10 +340,8 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_securitygrouprules_clears_dirty(self):
self._test_init('security_group_rule')
- self._make_security_group(
- 'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
+ self._make_security_group('json', 'meh', 'meh')['security_group']
# As the security group create operation also creates 2 security group
# rules there is no need to explicitly create any rule
- self.ctx.project_id = self._project_id
- self._list('security-group-rules', neutron_context=self.ctx)
+ self._list('security-group-rules', as_admin=True)
self._verify_dirty_bit('security_group_rule', expected_value=False)
diff --git a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py
index 2957a189bf..db41501ef9 100644
--- a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py
+++ b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py
@@ -209,13 +209,13 @@ class L3SchedulerBaseMixin(object):
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
- fmt=None, tenant_id=uuidutils.generate_uuid(),
+ fmt=None, tenant_id=None,
external_gateway_info=None,
- subnet=None, set_context=False,
- **kwargs):
+ subnet=None, **kwargs):
+ tenant_id = tenant_id or self._tenant_id
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
- set_context, **kwargs)
+ **kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
@@ -1380,6 +1380,7 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
subnet_ids = []
subnet_ids.append(subnet['subnet']['id'])
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=('admin_state_up',
portbindings.PROFILE,), **host_args):
diff --git a/neutron/tests/unit/services/metering/test_metering_plugin.py b/neutron/tests/unit/services/metering/test_metering_plugin.py
index 87a593bcf9..33961a20c6 100644
--- a/neutron/tests/unit/services/metering/test_metering_plugin.py
+++ b/neutron/tests/unit/services/metering/test_metering_plugin.py
@@ -17,7 +17,6 @@ from unittest import mock
from neutron_lib.agent import topics
from neutron_lib.api.definitions import metering as metering_apidef
from neutron_lib import context
-from neutron_lib.db import api as db_api
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.tests import tools
@@ -60,20 +59,6 @@ class MeteringTestExtensionManager(object):
return []
-# TODO(akamyshnikova):we need this temporary FakeContext class while Context
-# checking for existence of session attribute.
-class FakeContext(context.ContextBaseWithSession):
- def __init__(self, *args, **kwargs):
- super(FakeContext, self).__init__(*args, **kwargs)
- self._session = None
-
- @property
- def session(self):
- if self._session is None:
- self._session = db_api.get_writer_session()
- return self._session
-
-
class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
test_metering_db.MeteringPluginDbTestCaseMixin):
@@ -97,11 +82,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
- self.project_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
- self.ctx = FakeContext('', self.project_id, is_admin=True)
- self.context_patch = mock.patch('neutron_lib.context.Context',
- return_value=self.ctx)
- self.mock_context = self.context_patch.start()
+ self.ctx = context.Context('', self._tenant_id).elevated()
self.topic = topics.METERING_AGENT
@@ -159,7 +140,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
@@ -171,11 +152,9 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
with self.router(name='router2', tenant_id=tenant_id_2,
set_context=True):
self.mock_uuid.return_value = self.uuid
- with self.router(name='router1', tenant_id=self.project_id,
- set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True):
- self.mock_add.assert_called_with(self.ctx, expected)
+ with self.router(name='router1'):
+ with self.metering_label():
+ self.mock_add.assert_called_with(mock.ANY, expected)
def test_add_metering_label_shared_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
@@ -184,7 +163,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
@@ -195,14 +174,11 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
- with self.router(name='router1', tenant_id=self.project_id,
- shared=True, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True):
+ with self.router(name='router1', shared=True):
+ with self.metering_label():
self.mock_uuid.return_value = second_uuid
- with self.metering_label(tenant_id=tenant_id_2, shared=True,
- set_context=True):
- self.mock_add.assert_called_with(self.ctx, expected)
+ with self.metering_label(tenant_id=tenant_id_2, shared=True):
+ self.mock_add.assert_called_with(mock.ANY, expected)
def test_remove_metering_label_rpc_call(self):
expected = [{'status': 'ACTIVE',
@@ -210,19 +186,19 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
self.mock_add.assert_called_with(mock.ANY, expected)
self._delete('metering-labels',
- label['metering_label']['id'])
+ label['metering_label']['id'],
+ as_admin=True)
self.mock_remove.assert_called_with(mock.ANY, expected)
def test_remove_one_metering_label_rpc_call(self):
@@ -232,7 +208,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
@@ -246,22 +222,21 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True):
+ with self.router():
+ with self.metering_label():
self.mock_uuid.return_value = second_uuid
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.metering_label() as label:
self.mock_add.assert_called_with(mock.ANY, expected_add)
self._delete('metering-labels',
- label['metering_label']['id'])
+ label['metering_label']['id'],
+ as_admin=True)
self.mock_remove.assert_called_with(mock.ANY, expected_remove)
def test_add_and_remove_metering_label_rule_rpc_call(self):
@@ -271,7 +246,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix':
@@ -291,7 +266,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix':
@@ -307,15 +282,15 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
remote_ip_prefix = {'remote_ip_prefix': '10.0.0.0/24'}
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'], **remote_ip_prefix):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
- self._delete('metering-label-rules', second_uuid)
+ self._delete('metering-label-rules', second_uuid,
+ as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
@@ -326,7 +301,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'source_ip_prefix':
@@ -346,7 +321,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'source_ip_prefix':
@@ -362,16 +337,16 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
source_ip_prefix = {'source_ip_prefix': '10.0.0.0/24'}
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**source_ip_prefix):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
- self._delete('metering-label-rules', second_uuid)
+ self._delete('metering-label-rules', second_uuid,
+ as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
@@ -382,7 +357,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@@ -402,7 +377,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@@ -418,16 +393,16 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
source_ip_prefix = {'destination_ip_prefix': '10.0.0.0/24'}
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**source_ip_prefix):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
- self._delete('metering-label-rules', second_uuid)
+ self._delete('metering-label-rules', second_uuid,
+ as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
@@ -438,7 +413,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@@ -459,7 +434,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@@ -477,23 +452,22 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
ip_prefixes = {'source_ip_prefix': '10.0.0.0/24',
'destination_ip_prefix': '0.0.0.0/0'}
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**ip_prefixes):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
- self._delete('metering-label-rules', second_uuid)
+ self._delete('metering-label-rules', second_uuid,
+ as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_src_and_remote_ip(self):
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
@@ -514,9 +488,8 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
"NeutronError"]["message"])
def test_add_and_remove_metering_label_rule_dest_and_remote_ip(self):
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
@@ -537,9 +510,8 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
"NeutronError"]["message"])
def test_add_and_remove_metering_label_rule_no_ip_prefix_entered(self):
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
@@ -567,12 +539,15 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
# 1b9e9a6c2ccf7f9bc06429f53e5126f356ae3d4a/neutron/api/v2/base.py#L563
self.ctx.GUARD_TRANSACTION = False
with self.metering_label(tenant_id=tenant_id) as metering_label:
- with self.router(tenant_id=tenant_id, set_context=True) as r:
- router = self._show('routers', r['router']['id'])
+ with self.router(tenant_id=tenant_id) as r:
+ router = self._show('routers', r['router']['id'],
+ tenant_id=tenant_id)
self.assertEqual(tenant_id, router['router']['tenant_id'])
metering_label_id = metering_label['metering_label']['id']
- self._delete('metering-labels', metering_label_id, 204)
- router = self._show('routers', r['router']['id'])
+ self._delete('metering-labels', metering_label_id, 204,
+ as_admin=True)
+ router = self._show('routers', r['router']['id'],
+ tenant_id=tenant_id)
self.assertEqual(tenant_id, router['router']['tenant_id'])
@@ -609,11 +584,7 @@ class TestMeteringPluginL3AgentScheduler(
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
- self.project_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
- self.ctx = FakeContext('', self.project_id, is_admin=True)
- self.context_patch = mock.patch('neutron_lib.context.Context',
- return_value=self.ctx)
- self.mock_context = self.context_patch.start()
+ self.ctx = context.Context('', self._tenant_id).elevated()
self.l3routers_patch = mock.patch(scheduler +
'.get_l3_agents_hosting_routers')
@@ -640,7 +611,7 @@ class TestMeteringPluginL3AgentScheduler(
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
@@ -651,7 +622,7 @@ class TestMeteringPluginL3AgentScheduler(
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
@@ -670,15 +641,12 @@ class TestMeteringPluginL3AgentScheduler(
self.l3routers_mock.side_effect = side_effect
- with self.router(name='router1', tenant_id=self.project_id,
- set_context=True):
+ with self.router(name='router1'):
self.mock_uuid.return_value = second_uuid
- with self.router(name='router2', tenant_id=self.project_id,
- set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True):
+ with self.router(name='router2'):
+ with self.metering_label():
self.mock_add.assert_called_with(
- self.ctx, tools.UnorderedList(expected))
+ mock.ANY, tools.UnorderedList(expected))
class TestMeteringPluginL3AgentSchedulerServicePlugin(
@@ -727,7 +695,6 @@ class TestMeteringPluginRpcFromL3Agent(
self.meter_plugin = directory.get_plugin(constants.METERING)
- self.tenant_id = 'admin_tenant_id'
self.tenant_id_1 = 'tenant_id_1'
self.tenant_id_2 = 'tenant_id_2'
@@ -759,8 +726,7 @@ class TestMeteringPluginRpcFromL3Agent(
def test_get_sync_data_metering_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
- with self.metering_label(tenant_id=self.tenant_id,
- shared=True):
+ with self.metering_label(shared=True):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
@@ -773,7 +739,7 @@ class TestMeteringPluginRpcFromL3Agent(
def test_get_sync_data_metering_not_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
- with self.metering_label(tenant_id=self.tenant_id):
+ with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
@@ -786,13 +752,11 @@ class TestMeteringPluginRpcFromL3Agent(
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
- with self.router(
- name='router1', tenant_id=self.tenant_id
- ) as router1:
+ with self.router(name='router1') as router1:
self._add_external_gateway_to_router(
router1['router']['id'], s['network_id'])
- with self.router(name='router2', tenant_id=self.tenant_id):
- with self.metering_label(tenant_id=self.tenant_id):
+ with self.router(name='router2'):
+ with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(
@@ -807,18 +771,15 @@ class TestMeteringPluginRpcFromL3Agent(
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
- with self.router(
- name='router1', tenant_id=self.tenant_id
- ) as router1:
+ with self.router(name='router1') as router1:
self._add_external_gateway_to_router(
router1['router']['id'], s['network_id'])
with self.router(
- name='router2', tenant_id=self.tenant_id,
- admin_state_up=False
+ name='router2', admin_state_up=False
) as router2:
self._add_external_gateway_to_router(
router2['router']['id'], s['network_id'])
- with self.metering_label(tenant_id=self.tenant_id):
+ with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(
diff --git a/neutron/tests/unit/services/qos/test_qos_plugin.py b/neutron/tests/unit/services/qos/test_qos_plugin.py
index 875712f172..ecdea9987f 100644
--- a/neutron/tests/unit/services/qos/test_qos_plugin.py
+++ b/neutron/tests/unit/services/qos/test_qos_plugin.py
@@ -1927,7 +1927,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'alias_%s_rule' % rule_type: kwargs}
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
- request = self.new_update_request(resource, data, rule_id, self.fmt)
+ request = self.new_update_request(resource, data, rule_id, self.fmt,
+ as_admin=True)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -1936,7 +1937,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _show_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
- request = self.new_show_request(resource, rule_id, self.fmt)
+ request = self.new_show_request(resource, rule_id, self.fmt,
+ as_admin=True)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -1945,7 +1947,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _delete_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
- request = self.new_delete_request(resource, rule_id, self.fmt)
+ request = self.new_delete_request(resource, rule_id, self.fmt,
+ as_admin=True)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -2014,7 +2017,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
return_value=None):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
- request = self.new_show_request(resource, rule_id, self.fmt)
+ request = self.new_show_request(resource, rule_id, self.fmt,
+ as_admin=True)
res = request.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
diff --git a/neutron/tests/unit/services/revisions/test_revision_plugin.py b/neutron/tests/unit/services/revisions/test_revision_plugin.py
index 77bd869ae9..8f5ae05916 100644
--- a/neutron/tests/unit/services/revisions/test_revision_plugin.py
+++ b/neutron/tests/unit/services/revisions/test_revision_plugin.py
@@ -97,7 +97,7 @@ class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
# with the flush process that occurs with these two connected objects,
# creating two copies of the Network object in the Session and putting
# it into an invalid state.
- with self.network(shared=True):
+ with self.network(shared=True, as_admin=True):
pass
def test_port_name_update_revises(self):
@@ -279,7 +279,8 @@ class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'port': {'qos_policy_id': qos_obj['id']}}
- response = self._update('ports', port['port']['id'], data)
+ response = self._update('ports', port['port']['id'], data,
+ as_admin=True)
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
@@ -292,7 +293,8 @@ class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'network': {'qos_policy_id': qos_obj['id']}}
- response = self._update('networks', network['network']['id'], data)
+ response = self._update('networks', network['network']['id'], data,
+ as_admin=True)
new_rev = response['network']['revision_number']
self.assertGreater(new_rev, rev)
diff --git a/releasenotes/notes/enable-enforce-scope-and-new-defaults-1f82a9eb71125f5d.yaml b/releasenotes/notes/enable-enforce-scope-and-new-defaults-1f82a9eb71125f5d.yaml
new file mode 100644
index 0000000000..5ca899343e
--- /dev/null
+++ b/releasenotes/notes/enable-enforce-scope-and-new-defaults-1f82a9eb71125f5d.yaml
@@ -0,0 +1,25 @@
+---
+upgrade:
+ - |
+ The Neutron service enable the API policies (RBAC) new defaults and scope
+ by default. The Default value of config options
+ ``[oslo_policy] enforce_scope`` and
+ ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed
+ to ``True``.
+
+ This means if you are using system scope token to access Neutron API then
+ the request will be failed with 403 error code. Also, new defaults will be
+ enforced by default. To know about the new defaults of each policy
+ rule, refer to the `Policy New Defaults`_. For more detail about
+ the Neutron API policies changes, refer to `Policy Concepts`_.
+
+ If you want to disable them then modify the below config options value in
+ ``neutron.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=False
+ enforce_scope=False
+
+ .. _`Policy New Defaults`: https://docs.openstack.org/neutron/latest/configuration/policy.html
+ .. _`Policy Concepts`: https://docs.openstack.org/neutron/latest/contributor/internals/policy.html
+