summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi NATSUME <natsume.takashi@lab.ntt.co.jp>2018-01-05 11:42:17 +0900
committerTakashi NATSUME <natsume.takashi@lab.ntt.co.jp>2018-03-26 06:24:09 +0000
commitd49a6b8999c4b4986ea0ac0ce9f1dca2c076ee17 (patch)
tree5ba043e831690d4aa11aa1a36602a3df82ef0e9a
parent0390d5f0cef4a6c208da3383eff612686cd0cb45 (diff)
downloadnova-d49a6b8999c4b4986ea0ac0ce9f1dca2c076ee17.tar.gz
[placement] Add sending global request ID in get
Add the 'X-Openstack-Request-Id' header in the request of GET in SchedulerReportClient. Conflicts: nova/scheduler/client/report.py nova/tests/functional/api/openstack/placement/test_report_client.py nova/tests/unit/scheduler/client/test_report.py NOTE(takashin): The conflicts are due to Iab366da7623e5e31b8416e89fee7d418f7bf9b30. This patch should have been merged to stable/queens before Iab366da7623e5e31b8416e89fee7d418f7bf9b30! Change-Id: I306ac6f5c6b67d77d91a7ba24d4d863ab3e1bf5c Closes-Bug: #1734625 (cherry picked from commit ab4efbba61fb0dd0266e42e48555f80ddde72efd)
-rw-r--r--nova/compute/manager.py15
-rw-r--r--nova/compute/resource_tracker.py2
-rw-r--r--nova/conductor/tasks/migrate.py2
-rw-r--r--nova/scheduler/client/report.py99
-rw-r--r--nova/scheduler/manager.py3
-rw-r--r--nova/scheduler/utils.py2
-rw-r--r--nova/tests/functional/api/openstack/placement/test_report_client.py17
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py9
-rw-r--r--nova/tests/unit/compute/test_shelve.py6
-rw-r--r--nova/tests/unit/scheduler/client/test_report.py191
-rw-r--r--nova/tests/unit/scheduler/test_scheduler.py42
-rw-r--r--nova/tests/unit/scheduler/test_utils.py4
-rw-r--r--releasenotes/notes/bug-1734625-419fd0e21bd332f6.yaml9
13 files changed, 242 insertions, 159 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 3c5f068715..9c70a9a29a 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -2223,7 +2223,8 @@ class ComputeManager(manager.Manager):
try:
resources['allocations'] = (
- self.reportclient.get_allocations_for_consumer(instance.uuid))
+ self.reportclient.get_allocations_for_consumer(context,
+ instance.uuid))
except Exception:
LOG.exception('Failure retrieving placement allocations',
instance=instance)
@@ -3046,7 +3047,7 @@ class ComputeManager(manager.Manager):
context, instance, self.host, migration)
allocations = self.reportclient.get_allocations_for_consumer(
- instance.uuid)
+ context, instance.uuid)
network_info = instance.get_network_info()
if bdms is None:
@@ -3799,7 +3800,7 @@ class ComputeManager(manager.Manager):
# so, avoid doing the legacy behavior below.
mig_allocs = (
self.reportclient.get_allocations_for_consumer_by_provider(
- cn_uuid, migration.uuid))
+ context, cn_uuid, migration.uuid))
if mig_allocs:
LOG.info(_('Source node %(node)s reverted migration '
'%(mig)s; not deleting migration-based '
@@ -3814,7 +3815,7 @@ class ComputeManager(manager.Manager):
# accounting
allocs = (
self.reportclient.get_allocations_for_consumer_by_provider(
- cn_uuid, migration.uuid))
+ context, cn_uuid, migration.uuid))
if allocs:
# NOTE(danms): The source did migration-based allocation
# accounting, so we should let the source node rejigger
@@ -4013,7 +4014,7 @@ class ComputeManager(manager.Manager):
# Fetch the original allocation that the instance had on the source
# node, which are now held by the migration
orig_alloc = self.reportclient.get_allocations_for_consumer(
- migration.uuid)
+ context, migration.uuid)
if not orig_alloc:
# NOTE(danms): This migration did not do per-migration allocation
# accounting, so nothing to do here.
@@ -4915,7 +4916,7 @@ class ComputeManager(manager.Manager):
limits = filter_properties.get('limits', {})
allocations = self.reportclient.get_allocations_for_consumer(
- instance.uuid)
+ context, instance.uuid)
shelved_image_ref = instance.image_ref
if image:
@@ -6306,7 +6307,7 @@ class ComputeManager(manager.Manager):
migration = migrate_data.migration
rc = self.scheduler_client.reportclient
# Check to see if our migration has its own allocations
- allocs = rc.get_allocations_for_consumer(migration.uuid)
+ allocs = rc.get_allocations_for_consumer(ctxt, migration.uuid)
else:
# We didn't have data on a migration, which means we can't
# look up to see if we had new-style migration-based
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index bedcc54605..49edeed551 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -1232,7 +1232,7 @@ class ResourceTracker(object):
# always creates allocations for an instance
known_instances = set(self.tracked_instances.keys())
allocations = self.reportclient.get_allocations_for_resource_provider(
- cn.uuid) or {}
+ context, cn.uuid) or {}
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in known_instances:
diff --git a/nova/conductor/tasks/migrate.py b/nova/conductor/tasks/migrate.py
index 3a654375d8..fb8c2fc182 100644
--- a/nova/conductor/tasks/migrate.py
+++ b/nova/conductor/tasks/migrate.py
@@ -45,7 +45,7 @@ def replace_allocation_with_migration(context, instance, migration):
reportclient = schedclient.reportclient
orig_alloc = reportclient.get_allocations_for_consumer_by_provider(
- source_cn.uuid, instance.uuid)
+ context, source_cn.uuid, instance.uuid)
if not orig_alloc:
LOG.debug('Unable to find existing allocations for instance on '
'source compute node: %s. This is normal if you are not '
diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py
index ab53fe17bb..553b2904bc 100644
--- a/nova/scheduler/client/report.py
+++ b/nova/scheduler/client/report.py
@@ -274,8 +274,11 @@ class SchedulerReportClient(object):
client.additional_headers = {'accept': 'application/json'}
return client
- def get(self, url, version=None):
- return self._client.get(url, raise_exc=False, microversion=version)
+ def get(self, url, version=None, global_request_id=None):
+ headers = ({request_id.INBOUND_HEADER: global_request_id}
+ if global_request_id else {})
+ return self._client.get(url, raise_exc=False, microversion=version,
+ headers=headers)
def post(self, url, data, version=None, global_request_id=None):
headers = ({request_id.INBOUND_HEADER: global_request_id}
@@ -306,7 +309,7 @@ class SchedulerReportClient(object):
headers=headers)
@safe_connect
- def get_allocation_candidates(self, resources):
+ def get_allocation_candidates(self, context, resources):
"""Returns a tuple of (allocation_requests, provider_summaries,
allocation_request_version).
@@ -324,6 +327,7 @@ class SchedulerReportClient(object):
this data from placement, or (None, None, None) if the
request failed
+ :param context: The security context
:param nova.scheduler.utils.ResourceRequest resources:
A ResourceRequest object representing the requested resources and
traits from the request spec.
@@ -347,7 +351,8 @@ class SchedulerReportClient(object):
version = '1.17'
url = "/allocation_candidates?%s" % parse.urlencode(qs_params)
- resp = self.get(url, version=version)
+ resp = self.get(url, version=version,
+ global_request_id=context.global_id)
if resp.status_code == 200:
data = resp.json()
return (data['allocation_requests'], data['provider_summaries'],
@@ -371,7 +376,7 @@ class SchedulerReportClient(object):
return None, None, None
@safe_connect
- def _get_provider_aggregates(self, rp_uuid):
+ def _get_provider_aggregates(self, context, rp_uuid):
"""Queries the placement API for a resource provider's aggregates.
:param rp_uuid: UUID of the resource provider to grab aggregates for.
@@ -383,7 +388,7 @@ class SchedulerReportClient(object):
does not exist.
"""
resp = self.get("/resource_providers/%s/aggregates" % rp_uuid,
- version='1.1')
+ version='1.1', global_request_id=context.global_id)
if resp.status_code == 200:
data = resp.json()
return set(data['aggregates'])
@@ -402,9 +407,10 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderAggregateRetrievalFailed(uuid=rp_uuid)
@safe_connect
- def _get_provider_traits(self, rp_uuid):
+ def _get_provider_traits(self, context, rp_uuid):
"""Queries the placement API for a resource provider's traits.
+ :param context: The security context
:param rp_uuid: UUID of the resource provider to grab traits for.
:return: A set() of string trait names, which may be empty if the
specified provider has no traits.
@@ -413,7 +419,7 @@ class SchedulerReportClient(object):
empty set()) if the specified resource provider does not exist.
"""
resp = self.get("/resource_providers/%s/traits" % rp_uuid,
- version='1.6')
+ version='1.6', global_request_id=context.global_id)
if resp.status_code == 200:
return set(resp.json()['traits'])
@@ -428,17 +434,19 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderTraitRetrievalFailed(uuid=rp_uuid)
@safe_connect
- def _get_resource_provider(self, uuid):
+ def _get_resource_provider(self, context, uuid):
"""Queries the placement API for a resource provider record with the
supplied UUID.
+ :param context: The security context
:param uuid: UUID identifier for the resource provider to look up
:return: A dict of resource provider information if found or None if no
such resource provider could be found.
:raise: ResourceProviderRetrievalFailed on error.
"""
resp = self.get("/resource_providers/%s" % uuid,
- version=NESTED_PROVIDER_API_VERSION)
+ version=NESTED_PROVIDER_API_VERSION,
+ global_request_id=context.global_id)
if resp.status_code == 200:
data = resp.json()
return data
@@ -459,11 +467,12 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderRetrievalFailed(uuid=uuid)
@safe_connect
- def _get_sharing_providers(self, agg_uuids):
+ def _get_sharing_providers(self, context, agg_uuids):
"""Queries the placement API for a list of the resource providers
associated with any of the specified aggregates and possessing the
MISC_SHARES_VIA_AGGREGATE trait.
+ :param context: The security context
:param agg_uuids: Iterable of string UUIDs of aggregates to filter on.
:return: A list of dicts of resource provider information, which may be
empty if no provider exists with the specified UUID.
@@ -475,11 +484,11 @@ class SchedulerReportClient(object):
qpval = ','.join(agg_uuids)
# TODO(efried): Need a ?having_traits=[...] on this API!
resp = self.get("/resource_providers?member_of=in:" + qpval,
- version='1.3')
+ version='1.3', global_request_id=context.global_id)
if resp.status_code == 200:
rps = []
for rp in resp.json()['resource_providers']:
- traits = self._get_provider_traits(rp['uuid'])
+ traits = self._get_provider_traits(context, rp['uuid'])
if os_traits.MISC_SHARES_VIA_AGGREGATE in traits:
rps.append(rp)
return rps
@@ -499,17 +508,19 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderRetrievalFailed(message=msg % args)
@safe_connect
- def _get_providers_in_tree(self, uuid):
+ def _get_providers_in_tree(self, context, uuid):
"""Queries the placement API for a list of the resource providers in
the tree associated with the specified UUID.
+ :param context: The security context
:param uuid: UUID identifier for the resource provider to look up
:return: A list of dicts of resource provider information, which may be
empty if no provider exists with the specified UUID.
:raise: ResourceProviderRetrievalFailed on error.
"""
resp = self.get("/resource_providers?in_tree=%s" % uuid,
- version=NESTED_PROVIDER_API_VERSION)
+ version=NESTED_PROVIDER_API_VERSION,
+ global_request_id=context.global_id)
if resp.status_code == 200:
return resp.json()['resource_providers']
@@ -584,7 +595,7 @@ class SchedulerReportClient(object):
'placement_req_id': placement_req_id,
}
LOG.info(msg, args)
- return self._get_resource_provider(uuid)
+ return self._get_resource_provider(context, uuid)
# A provider with the same *name* already exists, or some other error.
msg = ("[%(placement_req_id)s] Failed to create resource provider "
@@ -641,12 +652,12 @@ class SchedulerReportClient(object):
# If we had the requested provider locally, refresh it and its
# descendants, but only if stale.
for u in self._provider_tree.get_provider_uuids(uuid):
- self._refresh_associations(u, force=False)
+ self._refresh_associations(context, u, force=False)
return uuid
# We don't have it locally; check placement or create it.
created_rp = None
- rps_to_refresh = self._get_providers_in_tree(uuid)
+ rps_to_refresh = self._get_providers_in_tree(context, uuid)
if not rps_to_refresh:
created_rp = self._create_resource_provider(
context, uuid, name or uuid,
@@ -661,7 +672,7 @@ class SchedulerReportClient(object):
for rp_to_refresh in rps_to_refresh:
self._refresh_associations(
- rp_to_refresh['uuid'],
+ context, rp_to_refresh['uuid'],
generation=rp_to_refresh.get('generation'), force=True)
return uuid
@@ -700,14 +711,14 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderInUse()
raise exception.ResourceProviderDeletionFailed(uuid=rp_uuid)
- def _get_inventory(self, rp_uuid):
+ def _get_inventory(self, context, rp_uuid):
url = '/resource_providers/%s/inventories' % rp_uuid
- result = self.get(url)
+ result = self.get(url, global_request_id=context.global_id)
if not result:
return None
return result.json()
- def _refresh_and_get_inventory(self, rp_uuid):
+ def _refresh_and_get_inventory(self, context, rp_uuid):
"""Helper method that retrieves the current inventory for the supplied
resource provider according to the placement API.
@@ -716,7 +727,7 @@ class SchedulerReportClient(object):
generation and attempt to update inventory if any exists, otherwise
return empty inventories.
"""
- curr = self._get_inventory(rp_uuid)
+ curr = self._get_inventory(context, rp_uuid)
if curr is None:
return None
@@ -726,8 +737,8 @@ class SchedulerReportClient(object):
self._provider_tree.update_inventory(rp_uuid, curr_inv, cur_gen)
return curr
- def _refresh_associations(self, rp_uuid, generation=None, force=False,
- refresh_sharing=True):
+ def _refresh_associations(self, context, rp_uuid, generation=None,
+ force=False, refresh_sharing=True):
"""Refresh aggregates, traits, and (optionally) aggregate-associated
sharing providers for the specified resource provider uuid.
@@ -739,6 +750,7 @@ class SchedulerReportClient(object):
historical: all code paths that get us here are doing inventory refresh
themselves.
+ :param context: The security context
:param rp_uuid: UUID of the resource provider to check for fresh
aggregates and traits
:param generation: The resource provider generation to set. If None,
@@ -755,7 +767,7 @@ class SchedulerReportClient(object):
"""
if force or self._associations_stale(rp_uuid):
# Refresh aggregates
- aggs = self._get_provider_aggregates(rp_uuid)
+ aggs = self._get_provider_aggregates(context, rp_uuid)
msg = ("Refreshing aggregate associations for resource provider "
"%s, aggregates: %s")
LOG.debug(msg, rp_uuid, ','.join(aggs or ['None']))
@@ -766,7 +778,7 @@ class SchedulerReportClient(object):
rp_uuid, aggs, generation=generation)
# Refresh traits
- traits = self._get_provider_traits(rp_uuid)
+ traits = self._get_provider_traits(context, rp_uuid)
msg = ("Refreshing trait associations for resource provider %s, "
"traits: %s")
LOG.debug(msg, rp_uuid, ','.join(traits or ['None']))
@@ -777,7 +789,7 @@ class SchedulerReportClient(object):
if refresh_sharing:
# Refresh providers associated by aggregate
- for rp in self._get_sharing_providers(aggs):
+ for rp in self._get_sharing_providers(context, aggs):
if not self._provider_tree.exists(rp['uuid']):
# NOTE(efried): Right now sharing providers are always
# treated as roots. This is deliberate. From the
@@ -790,7 +802,8 @@ class SchedulerReportClient(object):
# providers). No need to override force=True for newly-
# added providers - the missing timestamp will always
# trigger them to refresh.
- self._refresh_associations(rp['uuid'], force=force,
+ self._refresh_associations(context, rp['uuid'],
+ force=force,
refresh_sharing=False)
self.association_refresh_time[rp_uuid] = time.time()
@@ -816,7 +829,7 @@ class SchedulerReportClient(object):
# TODO(jaypipes): Should we really be calling the placement API to get
# the current inventory for every resource provider each and every time
# update_resource_stats() is called? :(
- curr = self._refresh_and_get_inventory(rp_uuid)
+ curr = self._refresh_and_get_inventory(context, rp_uuid)
if curr is None:
return False
@@ -945,7 +958,7 @@ class SchedulerReportClient(object):
if not self._provider_tree.has_inventory(rp_uuid):
return None
- curr = self._refresh_and_get_inventory(rp_uuid)
+ curr = self._refresh_and_get_inventory(context, rp_uuid)
# Check to see if we need to update placement's view
if not curr.get('inventories', {}):
@@ -1052,7 +1065,7 @@ class SchedulerReportClient(object):
parent_provider_uuid=parent_provider_uuid)
# Ensure inventories are up to date (for *all* cached RPs)
for uuid in self._provider_tree.get_provider_uuids():
- self._refresh_and_get_inventory(uuid)
+ self._refresh_and_get_inventory(context, uuid)
# Return a *copy* of the tree.
return copy.deepcopy(self._provider_tree)
@@ -1113,7 +1126,8 @@ class SchedulerReportClient(object):
# service knows. If the caller tries to ensure a nonexistent
# "standard" trait, they deserve the TraitCreationFailed exception
# they'll get.
- resp = self.get('/traits?name=in:' + ','.join(traits), version='1.6')
+ resp = self.get('/traits?name=in:' + ','.join(traits), version='1.6',
+ global_request_id=context.global_id)
if resp.status_code == 200:
traits_to_create = set(traits) - set(resp.json()['traits'])
# Might be neat to have a batch create. But creating multiple
@@ -1363,19 +1377,20 @@ class SchedulerReportClient(object):
self._delete_inventory(context, compute_node.uuid)
@safe_connect
- def get_allocations_for_consumer(self, consumer):
+ def get_allocations_for_consumer(self, context, consumer):
url = '/allocations/%s' % consumer
- resp = self.get(url)
+ resp = self.get(url, global_request_id=context.global_id)
if not resp:
return {}
else:
return resp.json()['allocations']
- def get_allocations_for_consumer_by_provider(self, rp_uuid, consumer):
+ def get_allocations_for_consumer_by_provider(self, context, rp_uuid,
+ consumer):
# NOTE(cdent): This trims to just the allocations being
# used on this resource provider. In the future when there
# are shared resources there might be other providers.
- allocations = self.get_allocations_for_consumer(consumer)
+ allocations = self.get_allocations_for_consumer(context, consumer)
if allocations is None:
# safe_connect can return None on 404
allocations = {}
@@ -1385,7 +1400,7 @@ class SchedulerReportClient(object):
def _allocate_for_instance(self, context, rp_uuid, instance):
my_allocations = _instance_to_allocations_dict(instance)
current_allocations = self.get_allocations_for_consumer_by_provider(
- rp_uuid, instance.uuid)
+ context, rp_uuid, instance.uuid)
if current_allocations == my_allocations:
allocstr = ','.join(['%s=%s' % (k, v)
for k, v in my_allocations.items()])
@@ -1470,7 +1485,7 @@ class SchedulerReportClient(object):
# We first need to determine if this is a move operation and if so
# create the "doubled-up" allocation that exists for the duration of
# the move operation against both the source and destination hosts
- r = self.get(url)
+ r = self.get(url, global_request_id=context.global_id)
if r.status_code == 200:
current_allocs = r.json()['allocations']
if current_allocs:
@@ -1526,7 +1541,7 @@ class SchedulerReportClient(object):
url = '/allocations/%s' % consumer_uuid
# Grab the "doubled-up" allocation that we will manipulate
- r = self.get(url)
+ r = self.get(url, global_request_id=context.global_id)
if r.status_code != 200:
LOG.warning("Failed to retrieve allocations for %s. Got HTTP %s",
consumer_uuid, r.status_code)
@@ -1750,9 +1765,9 @@ class SchedulerReportClient(object):
self.delete_allocation_for_instance(context, instance.uuid)
@safe_connect
- def get_allocations_for_resource_provider(self, rp_uuid):
+ def get_allocations_for_resource_provider(self, context, rp_uuid):
url = '/resource_providers/%s/allocations' % rp_uuid
- resp = self.get(url)
+ resp = self.get(url, global_request_id=context.global_id)
if not resp:
return {}
else:
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index c39fa2e376..fd33cfb1ba 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -119,7 +119,8 @@ class SchedulerManager(manager.Manager):
alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
= None, None, None
if self.driver.USES_ALLOCATION_CANDIDATES:
- res = self.placement_client.get_allocation_candidates(resources)
+ res = self.placement_client.get_allocation_candidates(ctxt,
+ resources)
if res is None:
# We have to handle the case that we failed to connect to the
# Placement service and the safe_connect decorator on
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index 190d8dd87b..4734587153 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -366,7 +366,7 @@ def claim_resources_on_destination(
if not source_node_allocations:
source_node_allocations = (
reportclient.get_allocations_for_consumer_by_provider(
- source_node.uuid, instance.uuid))
+ context, source_node.uuid, instance.uuid))
if source_node_allocations:
# Generate an allocation request for the destination node.
alloc_request = {
diff --git a/nova/tests/functional/api/openstack/placement/test_report_client.py b/nova/tests/functional/api/openstack/placement/test_report_client.py
index f328f26e18..01ed6f171d 100644
--- a/nova/tests/functional/api/openstack/placement/test_report_client.py
+++ b/nova/tests/functional/api/openstack/placement/test_report_client.py
@@ -106,9 +106,11 @@ class SchedulerReportClientTests(test.TestCase):
res_class = fields.ResourceClass.VCPU
with self._interceptor():
# When we start out there are no resource providers.
- rp = self.client._get_resource_provider(self.compute_uuid)
+ rp = self.client._get_resource_provider(self.context,
+ self.compute_uuid)
self.assertIsNone(rp)
- rps = self.client._get_providers_in_tree(self.compute_uuid)
+ rps = self.client._get_providers_in_tree(self.context,
+ self.compute_uuid)
self.assertEqual([], rps)
# But get_provider_tree_and_ensure_root creates one (via
# _ensure_resource_provider)
@@ -120,15 +122,18 @@ class SchedulerReportClientTests(test.TestCase):
self.client.update_compute_node(self.context, self.compute_node)
# So now we have a resource provider
- rp = self.client._get_resource_provider(self.compute_uuid)
+ rp = self.client._get_resource_provider(self.context,
+ self.compute_uuid)
self.assertIsNotNone(rp)
- rps = self.client._get_providers_in_tree(self.compute_uuid)
+ rps = self.client._get_providers_in_tree(self.context,
+ self.compute_uuid)
self.assertEqual(1, len(rps))
# We should also have empty sets of aggregate and trait
# associations
self.assertEqual(
- [], self.client._get_sharing_providers([uuids.agg]))
+ [], self.client._get_sharing_providers(self.context,
+ [uuids.agg]))
self.assertFalse(
self.client._provider_tree.have_aggregates_changed(
self.compute_uuid, []))
@@ -312,6 +317,8 @@ class SchedulerReportClientTests(test.TestCase):
self.client.put('/resource_providers/%s' % self.compute_uuid,
payload,
global_request_id=global_request_id)
+ self.client.get('/resource_providers/%s' % self.compute_uuid,
+ global_request_id=global_request_id)
def test_get_provider_tree_with_nested_and_aggregates(self):
"""A more in-depth test of get_provider_tree_and_ensure_root with
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index c07f82c793..46096294e6 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -5641,7 +5641,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
- self.mock_get_allocs.assert_called_once_with(self.instance.uuid)
+ self.mock_get_allocs.assert_called_once_with(self.context,
+ self.instance.uuid)
mock_net_wait.assert_called_once_with(do_raise=False)
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@@ -6489,7 +6490,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
'src')
self.assertFalse(mock_report.delete_allocation_for_instance.called)
ga.assert_called_once_with(
- mock_rt().get_node_uuid.return_value, self.migration.uuid)
+ self.context, mock_rt().get_node_uuid.return_value,
+ self.migration.uuid)
old = mock_report.remove_provider_from_instance_allocation
if new_rules:
@@ -6523,7 +6525,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
'dst')
self.assertFalse(mock_report.delete_allocation_for_instance.called)
cn_uuid = mock_rt().get_node_uuid.return_value
- ga.assert_called_once_with(cn_uuid, self.migration.uuid)
+ ga.assert_called_once_with(self.context, cn_uuid,
+ self.migration.uuid)
old = mock_report.remove_provider_from_instance_allocation
if new_rules:
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index 0f6295604e..ca757d3218 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -350,7 +350,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
test.MatchType(objects.ImageMeta), injected_files=[],
admin_password=None, allocations={}, network_info=[],
block_device_info='fake_bdm')
- self.mock_get_allocs.assert_called_once_with(instance.uuid)
+ self.mock_get_allocs.assert_called_once_with(self.context,
+ instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
self.assertNotIn('shelved_at', instance.system_metadata)
@@ -451,7 +452,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
test.MatchType(objects.ImageMeta),
injected_files=[], admin_password=None,
allocations={}, network_info=[], block_device_info='fake_bdm')
- self.mock_get_allocs.assert_called_once_with(instance.uuid)
+ self.mock_get_allocs.assert_called_once_with(self.context,
+ instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py
index 8d825c9588..76db4b30e9 100644
--- a/nova/tests/unit/scheduler/client/test_report.py
+++ b/nova/tests/unit/scheduler/client/test_report.py
@@ -48,12 +48,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
A missing endpoint entry should not explode.
"""
req.side_effect = ks_exc.EndpointNotFound()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@@ -65,7 +65,7 @@ class SafeConnectedTestCase(test.NoDBTestCase):
A missing endpoint should cause _create_client to be called.
"""
req.side_effect = ks_exc.EndpointNotFound()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
# This is the second time _create_client is called, but the first since
# the mock was created.
@@ -79,12 +79,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
"""
req.side_effect = ks_exc.MissingAuthPlugin()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch('keystoneauth1.session.Session.request')
@@ -95,12 +95,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
"""
req.side_effect = ks_exc.Unauthorized()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch('keystoneauth1.session.Session.request')
@@ -112,12 +112,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
"""
req.side_effect = ks_exc.ConnectFailure()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls do
# work
req.reset_mock()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch.object(report, 'LOG')
@@ -138,12 +138,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
Failed discovery should not blow up.
"""
req.side_effect = ks_exc.DiscoveryFailure()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
- self.client._get_resource_provider("fake")
+ self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@@ -1245,12 +1245,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
},
]
self.client._ensure_resource_provider(self.context, cn.uuid)
- get_shr_mock.assert_called_once_with(set([uuids.agg1, uuids.agg2]))
+ get_shr_mock.assert_called_once_with(
+ self.context, set([uuids.agg1, uuids.agg2]))
self.assertTrue(self.client._provider_tree.exists(uuids.shr1))
self.assertTrue(self.client._provider_tree.exists(uuids.shr2))
# _get_provider_aggregates and _traits were called thrice: one for the
# compute RP and once for each of the sharing RPs.
- expected_calls = [mock.call(uuid)
+ expected_calls = [mock.call(self.context, uuid)
for uuid in (cn.uuid, uuids.shr1, uuids.shr2)]
get_agg_mock.assert_has_calls(expected_calls)
get_trait_mock.assert_has_calls(expected_calls)
@@ -1303,23 +1304,24 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.client._ensure_resource_provider(self.context, uuids.compute_node)
- get_rpt_mock.assert_called_once_with(uuids.compute_node)
+ get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
- get_agg_mock.assert_called_once_with(uuids.compute_node)
+ get_agg_mock.assert_called_once_with(self.context, uuids.compute_node)
self.assertTrue(
self.client._provider_tree.in_aggregates(uuids.compute_node,
[uuids.agg1]))
self.assertFalse(
self.client._provider_tree.in_aggregates(uuids.compute_node,
[uuids.agg2]))
- get_trait_mock.assert_called_once_with(uuids.compute_node)
+ get_trait_mock.assert_called_once_with(self.context,
+ uuids.compute_node)
self.assertTrue(
self.client._provider_tree.has_traits(uuids.compute_node,
['CUSTOM_GOLD']))
self.assertFalse(
self.client._provider_tree.has_traits(uuids.compute_node,
['CUSTOM_SILVER']))
- get_shr_mock.assert_called_once_with(set([uuids.agg1]))
+ get_shr_mock.assert_called_once_with(self.context, set([uuids.agg1]))
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
self.assertFalse(create_rp_mock.called)
@@ -1343,7 +1345,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.client._ensure_resource_provider, self.context,
uuids.compute_node)
- get_rpt_mock.assert_called_once_with(uuids.compute_node)
+ get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
create_rp_mock.assert_called_once_with(
self.context, uuids.compute_node, uuids.compute_node,
parent_provider_uuid=None)
@@ -1383,7 +1385,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
# We don't refresh for a just-created provider
refresh_mock.assert_not_called()
- get_rpt_mock.assert_called_once_with(uuids.compute_node)
+ get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
create_rp_mock.assert_called_once_with(
self.context,
uuids.compute_node,
@@ -1483,7 +1485,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
mock_exists.assert_called_once_with(uuids.root)
mock_gpu.assert_called_once_with(uuids.root)
mock_refresh.assert_has_calls(
- [mock.call(uuid, force=False) for uuid in tree_uuids])
+ [mock.call(self.context, uuid, force=False)
+ for uuid in tree_uuids])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
@@ -1500,9 +1503,9 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertEqual(uuids.root,
self.client._ensure_resource_provider(self.context,
uuids.root))
- mock_gpit.assert_called_once_with(uuids.root)
+ mock_gpit.assert_called_once_with(self.context, uuids.root)
mock_refresh.assert_has_calls(
- [mock.call(uuid, generation=42, force=True)
+ [mock.call(self.context, uuid, generation=42, force=True)
for uuid in tree_uuids])
self.assertEqual(tree_uuids,
set(self.client._provider_tree.get_provider_uuids()))
@@ -1522,7 +1525,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertEqual(uuids.root,
self.client._ensure_resource_provider(self.context,
uuids.root))
- mock_gpit.assert_called_once_with(uuids.root)
+ mock_gpit.assert_called_once_with(self.context, uuids.root)
mock_create.assert_called_once_with(self.context, uuids.root,
uuids.root,
parent_provider_uuid=None)
@@ -1552,10 +1555,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.ks_adap_mock.get.return_value = resp_mock
alloc_reqs, p_sums, allocation_request_version = \
- self.client.get_allocation_candidates(resources)
+ self.client.get_allocation_candidates(self.context, resources)
self.ks_adap_mock.get.assert_called_once_with(
- mock.ANY, raise_exc=False, microversion='1.17')
+ mock.ANY, raise_exc=False, microversion='1.17',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)
query = parse.parse_qs(split_url.query)
@@ -1583,10 +1587,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.ks_adap_mock.get.return_value = resp_mock
alloc_reqs, p_sums, allocation_request_version = \
- self.client.get_allocation_candidates(resources)
+ self.client.get_allocation_candidates(self.context, resources)
self.ks_adap_mock.get.assert_called_once_with(
- mock.ANY, raise_exc=False, microversion='1.17')
+ mock.ANY, raise_exc=False, microversion='1.17',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)
query = parse.parse_qs(split_url.query)
@@ -1609,10 +1614,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
resources = scheduler_utils.ResourceRequest.from_extra_specs(
{'resources:MEMORY_MB': '1024'})
- res = self.client.get_allocation_candidates(resources)
+ res = self.client.get_allocation_candidates(self.context, resources)
self.ks_adap_mock.get.assert_called_once_with(
- mock.ANY, raise_exc=False, microversion='1.17')
+ mock.ANY, raise_exc=False, microversion='1.17',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)
query = parse.parse_qs(split_url.query)
@@ -1634,7 +1640,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
resp_mock.json.return_value = json_data
self.ks_adap_mock.get.return_value = resp_mock
- result = self.client._get_resource_provider(uuid)
+ result = self.client._get_resource_provider(self.context, uuid)
expected_provider_dict = dict(
uuid=uuid,
@@ -1644,7 +1650,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
)
expected_url = '/resource_providers/' + uuid
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, raise_exc=False, microversion='1.14')
+ expected_url, raise_exc=False, microversion='1.14',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(expected_provider_dict, result)
def test_get_resource_provider_not_found(self):
@@ -1654,11 +1661,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.ks_adap_mock.get.return_value = resp_mock
uuid = uuids.compute_node
- result = self.client._get_resource_provider(uuid)
+ result = self.client._get_resource_provider(self.context, uuid)
expected_url = '/resource_providers/' + uuid
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, raise_exc=False, microversion='1.14')
+ expected_url, raise_exc=False, microversion='1.14',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertIsNone(result)
@mock.patch.object(report.LOG, 'error')
@@ -1674,11 +1682,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
uuid = uuids.compute_node
self.assertRaises(
exception.ResourceProviderRetrievalFailed,
- self.client._get_resource_provider, uuid)
+ self.client._get_resource_provider, self.context, uuid)
expected_url = '/resource_providers/' + uuid
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, raise_exc=False, microversion='1.14')
+ expected_url, raise_exc=False, microversion='1.14',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
# A 503 Service Unavailable should trigger an error log that
# includes the placement request id and return None
# from _get_resource_provider()
@@ -1718,17 +1727,19 @@ class TestProviderOperations(SchedulerReportClientTestCase):
set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']),
set(['CUSTOM_BAR']),
]
- result = self.client._get_sharing_providers([uuids.agg1, uuids.agg2])
+ result = self.client._get_sharing_providers(
+ self.context, [uuids.agg1, uuids.agg2])
expected_url = ('/resource_providers?member_of=in:' +
','.join((uuids.agg1, uuids.agg2)))
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, raise_exc=False, microversion='1.3')
+ expected_url, raise_exc=False, microversion='1.3',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(rpjson[:1], result)
def test_get_sharing_providers_emptylist(self):
self.assertEqual(
- [], self.client._get_sharing_providers([]))
+ [], self.client._get_sharing_providers(self.context, []))
self.ks_adap_mock.get.assert_not_called()
@mock.patch.object(report.LOG, 'error')
@@ -1742,11 +1753,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
uuid = uuids.agg
self.assertRaises(exception.ResourceProviderRetrievalFailed,
- self.client._get_sharing_providers, [uuid])
+ self.client._get_sharing_providers,
+ self.context, [uuid])
expected_url = '/resource_providers?member_of=in:' + uuid
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, raise_exc=False, microversion='1.3')
+ expected_url, raise_exc=False, microversion='1.3',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
# A 503 Service Unavailable should trigger an error log that
# includes the placement request id
self.assertTrue(logging_mock.called)
@@ -1776,11 +1789,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
resp_mock.json.return_value = {'resource_providers': rpjson}
self.ks_adap_mock.get.return_value = resp_mock
- result = self.client._get_providers_in_tree(root)
+ result = self.client._get_providers_in_tree(self.context, root)
expected_url = '/resource_providers?in_tree=' + root
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, raise_exc=False, microversion='1.14')
+ expected_url, raise_exc=False, microversion='1.14',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(rpjson, result)
@mock.patch.object(report.LOG, 'error')
@@ -1794,11 +1808,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
uuid = uuids.compute_node
self.assertRaises(exception.ResourceProviderRetrievalFailed,
- self.client._get_providers_in_tree, uuid)
+ self.client._get_providers_in_tree, self.context,
+ uuid)
expected_url = '/resource_providers?in_tree=' + uuid
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, raise_exc=False, microversion='1.14')
+ expected_url, raise_exc=False, microversion='1.14',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
# A 503 Service Unavailable should trigger an error log that includes
# the placement request id
self.assertTrue(logging_mock.called)
@@ -2031,11 +2047,12 @@ class TestAggregates(SchedulerReportClientTestCase):
resp_mock.json.return_value = {'aggregates': aggs}
self.ks_adap_mock.get.return_value = resp_mock
- result = self.client._get_provider_aggregates(uuid)
+ result = self.client._get_provider_aggregates(self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/aggregates'
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, raise_exc=False, microversion='1.1')
+ expected_url, raise_exc=False, microversion='1.1',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(set(aggs), result)
@mock.patch.object(report.LOG, 'error')
@@ -2052,11 +2069,12 @@ class TestAggregates(SchedulerReportClientTestCase):
resp_mock.status_code = status_code
self.assertRaises(
exception.ResourceProviderAggregateRetrievalFailed,
- self.client._get_provider_aggregates, uuid)
+ self.client._get_provider_aggregates, self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/aggregates'
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, raise_exc=False, microversion='1.1')
+ expected_url, raise_exc=False, microversion='1.1',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(log_mock.called)
self.assertEqual(uuids.request_id,
log_mock.call_args[0][1]['placement_req_id'])
@@ -2077,11 +2095,13 @@ class TestTraits(SchedulerReportClientTestCase):
resp_mock.json.return_value = {'traits': traits}
self.ks_adap_mock.get.return_value = resp_mock
- result = self.client._get_provider_traits(uuid)
+ result = self.client._get_provider_traits(self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/traits'
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, **self.trait_api_kwargs)
+ expected_url,
+ headers={'X-Openstack-Request-Id': self.context.global_id},
+ **self.trait_api_kwargs)
self.assertEqual(set(traits), result)
@mock.patch.object(report.LOG, 'error')
@@ -2098,11 +2118,13 @@ class TestTraits(SchedulerReportClientTestCase):
resp_mock.status_code = status_code
self.assertRaises(
exception.ResourceProviderTraitRetrievalFailed,
- self.client._get_provider_traits, uuid)
+ self.client._get_provider_traits, self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/traits'
self.ks_adap_mock.get.assert_called_once_with(
- expected_url, **self.trait_api_kwargs)
+ expected_url,
+ headers={'X-Openstack-Request-Id': self.context.global_id},
+ **self.trait_api_kwargs)
self.assertTrue(log_mock.called)
self.assertEqual(uuids.request_id,
log_mock.call_args[0][1]['placement_req_id'])
@@ -2124,7 +2146,9 @@ class TestTraits(SchedulerReportClientTestCase):
get_mock.json.return_value = {'traits': standard_traits}
self.client._ensure_traits(self.context, all_traits)
self.ks_adap_mock.get.assert_called_once_with(
- '/traits?name=in:' + ','.join(all_traits), **self.trait_api_kwargs)
+ '/traits?name=in:' + ','.join(all_traits),
+ headers={'X-Openstack-Request-Id': self.context.global_id},
+ **self.trait_api_kwargs)
self.ks_adap_mock.put.assert_has_calls(
[mock.call('/traits/' + trait,
headers={'X-Openstack-Request-Id': self.context.global_id},
@@ -2138,6 +2162,7 @@ class TestTraits(SchedulerReportClientTestCase):
self.client._ensure_traits(self.context, standard_traits)
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(standard_traits),
+ headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_not_called()
@@ -2157,7 +2182,9 @@ class TestTraits(SchedulerReportClientTestCase):
self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
- '/traits?name=in:FOO', **self.trait_api_kwargs)
+ '/traits?name=in:FOO',
+ headers={'X-Openstack-Request-Id': self.context.global_id},
+ **self.trait_api_kwargs)
self.ks_adap_mock.put.assert_not_called()
def test_ensure_traits_fail_creation(self):
@@ -2173,7 +2200,9 @@ class TestTraits(SchedulerReportClientTestCase):
self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
- '/traits?name=in:FOO', **self.trait_api_kwargs)
+ '/traits?name=in:FOO',
+ headers={'X-Openstack-Request-Id': self.context.global_id},
+ **self.trait_api_kwargs)
self.ks_adap_mock.put.assert_called_once_with(
'/traits/FOO',
headers={'X-Openstack-Request-Id': self.context.global_id},
@@ -2201,7 +2230,9 @@ class TestTraits(SchedulerReportClientTestCase):
# Verify API calls
self.ks_adap_mock.get.assert_called_once_with(
- '/traits?name=in:' + ','.join(traits), **self.trait_api_kwargs)
+ '/traits?name=in:' + ','.join(traits),
+ headers={'X-Openstack-Request-Id': self.context.global_id},
+ **self.trait_api_kwargs)
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/traits' % uuids.rp,
json={'traits': traits, 'resource_provider_generation': 0},
@@ -2264,10 +2295,11 @@ class TestAssociations(SchedulerReportClientTestCase):
self.client._provider_tree.new_root('compute', uuid, 1)
mock_agg_get.return_value = set([uuids.agg1])
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
- self.client._refresh_associations(uuid)
- mock_agg_get.assert_called_once_with(uuid)
- mock_trait_get.assert_called_once_with(uuid)
- mock_shr_get.assert_called_once_with(mock_agg_get.return_value)
+ self.client._refresh_associations(self.context, uuid)
+ mock_agg_get.assert_called_once_with(self.context, uuid)
+ mock_trait_get.assert_called_once_with(self.context, uuid)
+ mock_shr_get.assert_called_once_with(
+ self.context, mock_agg_get.return_value)
self.assertIn(uuid, self.client.association_refresh_time)
self.assertTrue(
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
@@ -2293,9 +2325,10 @@ class TestAssociations(SchedulerReportClientTestCase):
self.client._provider_tree.new_root('compute', uuid, 1)
mock_agg_get.return_value = set([uuids.agg1])
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
- self.client._refresh_associations(uuid, refresh_sharing=False)
- mock_agg_get.assert_called_once_with(uuid)
- mock_trait_get.assert_called_once_with(uuid)
+ self.client._refresh_associations(self.context, uuid,
+ refresh_sharing=False)
+ mock_agg_get.assert_called_once_with(self.context, uuid)
+ mock_trait_get.assert_called_once_with(self.context, uuid)
mock_shr_get.assert_not_called()
self.assertIn(uuid, self.client.association_refresh_time)
self.assertTrue(
@@ -2322,7 +2355,7 @@ class TestAssociations(SchedulerReportClientTestCase):
"""
mock_stale.return_value = False
uuid = uuids.compute_node
- self.client._refresh_associations(uuid)
+ self.client._refresh_associations(self.context, uuid)
mock_agg_get.assert_not_called()
mock_trait_get.assert_not_called()
mock_shr_get.assert_not_called()
@@ -2347,10 +2380,10 @@ class TestAssociations(SchedulerReportClientTestCase):
# Called a first time because association_refresh_time is empty.
now = time.time()
- self.client._refresh_associations(uuid)
- mock_agg_get.assert_called_once_with(uuid)
- mock_trait_get.assert_called_once_with(uuid)
- mock_shr_get.assert_called_once_with(set())
+ self.client._refresh_associations(self.context, uuid)
+ mock_agg_get.assert_called_once_with(self.context, uuid)
+ mock_trait_get.assert_called_once_with(self.context, uuid)
+ mock_shr_get.assert_called_once_with(self.context, set())
log_mock.assert_has_calls([
mock.call('Refreshing aggregate associations for resource '
'provider %s, aggregates: %s', uuid, 'None'),
@@ -2367,17 +2400,17 @@ class TestAssociations(SchedulerReportClientTestCase):
with mock.patch('time.time') as mock_future:
# Not called a second time because not enough time has passed.
mock_future.return_value = now + report.ASSOCIATION_REFRESH / 2
- self.client._refresh_associations(uuid)
+ self.client._refresh_associations(self.context, uuid)
mock_agg_get.assert_not_called()
mock_trait_get.assert_not_called()
mock_shr_get.assert_not_called()
# Called because time has passed.
mock_future.return_value = now + report.ASSOCIATION_REFRESH + 1
- self.client._refresh_associations(uuid)
- mock_agg_get.assert_called_once_with(uuid)
- mock_trait_get.assert_called_once_with(uuid)
- mock_shr_get.assert_called_once_with(set())
+ self.client._refresh_associations(self.context, uuid)
+ mock_agg_get.assert_called_once_with(self.context, uuid)
+ mock_trait_get.assert_called_once_with(self.context, uuid)
+ mock_shr_get.assert_called_once_with(self.context, set())
class TestComputeNodeToInventoryDict(test.NoDBTestCase):
@@ -2836,7 +2869,8 @@ There was a conflict when trying to complete your request.
self.assertTrue(result)
exp_url = '/resource_providers/%s/inventories' % uuid
- mock_get.assert_called_once_with(exp_url)
+ mock_get.assert_called_once_with(
+ exp_url, global_request_id=self.context.global_id)
# Updated with the new inventory from the PUT call
self._validate_provider(uuid, generation=44)
expected = {
@@ -2913,7 +2947,8 @@ There was a conflict when trying to complete your request.
self.assertTrue(result)
exp_url = '/resource_providers/%s/inventories' % uuid
- mock_get.assert_called_once_with(exp_url)
+ mock_get.assert_called_once_with(
+ exp_url, global_request_id=self.context.global_id)
# Updated with the new inventory from the PUT call
self._validate_provider(uuid, generation=44)
expected = {
@@ -2999,7 +3034,8 @@ There was a conflict when trying to complete your request.
)
self.assertTrue(result)
exp_url = '/resource_providers/%s/inventories' % uuid
- mock_get.assert_called_once_with(exp_url)
+ mock_get.assert_called_once_with(
+ exp_url, global_request_id=self.context.global_id)
# No update so put should not be called
self.assertFalse(mock_put.called)
# Make sure we updated the generation from the inventory records
@@ -3567,7 +3603,8 @@ class TestAllocations(SchedulerReportClientTestCase):
self.client.update_instance_allocation(self.context, cn, inst, 1)
self.assertFalse(mock_put.called)
mock_get.assert_called_once_with(
- '/allocations/%s' % inst.uuid)
+ '/allocations/%s' % inst.uuid,
+ global_request_id=self.context.global_id)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get')
diff --git a/nova/tests/unit/scheduler/test_scheduler.py b/nova/tests/unit/scheduler/test_scheduler.py
index ee04558dc4..204fcbc43d 100644
--- a/nova/tests/unit/scheduler/test_scheduler.py
+++ b/nova/tests/unit/scheduler/test_scheduler.py
@@ -119,12 +119,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
}
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
- self.manager.select_destinations(None, spec_obj=fake_spec,
+ self.manager.select_destinations(self.context, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
- select_destinations.assert_called_once_with(None, fake_spec,
+ select_destinations.assert_called_once_with(
+ self.context, fake_spec,
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, fake_version, False)
- mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
+ mock_get_ac.assert_called_once_with(
+ self.context, mock_rfrs.return_value)
# Now call select_destinations() with True values for the params
# introduced in RPC version 4.5
@@ -196,10 +198,12 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.assertRaises(messaging.rpc.dispatcher.ExpectedException,
- self.manager.select_destinations, None, spec_obj=fake_spec,
+ self.manager.select_destinations, self.context,
+ spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_not_called()
- mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
+ mock_get_ac.assert_called_once_with(
+ self.context, mock_rfrs.return_value)
def test_select_destination_old_placement(self):
"""Tests that we will raise NoValidhost when the scheduler
@@ -240,11 +244,12 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
}
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
- self.manager.select_destinations(None, spec_obj=fake_spec)
- select_destinations.assert_called_once_with(None, fake_spec, None,
- expected_alloc_reqs_by_rp_uuid, mock.sentinel.p_sums, "42.0",
- False)
- mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
+ self.manager.select_destinations(self.context, spec_obj=fake_spec)
+ select_destinations.assert_called_once_with(self.context,
+ fake_spec, None, expected_alloc_reqs_by_rp_uuid,
+ mock.sentinel.p_sums, "42.0", False)
+ mock_get_ac.assert_called_once_with(
+ self.context, mock_rfrs.return_value)
# TODO(sbauza): Remove that test once the API v4 is removed
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@@ -264,13 +269,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
}
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
- self.manager.select_destinations(None, request_spec='fake_spec',
- filter_properties='fake_props',
- instance_uuids=[fake_spec.instance_uuid])
- select_destinations.assert_called_once_with(None, fake_spec,
- [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
- mock.sentinel.p_sums, "42.0", False)
- mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
+ self.manager.select_destinations(
+ self.context, request_spec='fake_spec',
+ filter_properties='fake_props',
+ instance_uuids=[fake_spec.instance_uuid])
+ select_destinations.assert_called_once_with(
+ self.context, fake_spec,
+ [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
+ mock.sentinel.p_sums, "42.0", False)
+ mock_get_ac.assert_called_once_with(
+ self.context, mock_rfrs.return_value)
def test_update_aggregates(self):
with mock.patch.object(self.manager.driver.host_manager,
diff --git a/nova/tests/unit/scheduler/test_utils.py b/nova/tests/unit/scheduler/test_utils.py
index 56d5aa737b..f4202df326 100644
--- a/nova/tests/unit/scheduler/test_utils.py
+++ b/nova/tests/unit/scheduler/test_utils.py
@@ -465,7 +465,7 @@ class TestUtils(test.NoDBTestCase):
utils.claim_resources_on_destination(
self.context, reportclient, instance, source_node, dest_node)
mock_get_allocs.assert_called_once_with(
- uuids.source_node, instance.uuid)
+ self.context, uuids.source_node, instance.uuid)
test()
@@ -505,7 +505,7 @@ class TestUtils(test.NoDBTestCase):
self.context, reportclient, instance,
source_node, dest_node)
mock_get_allocs.assert_called_once_with(
- uuids.source_node, instance.uuid)
+ self.context, uuids.source_node, instance.uuid)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
diff --git a/releasenotes/notes/bug-1734625-419fd0e21bd332f6.yaml b/releasenotes/notes/bug-1734625-419fd0e21bd332f6.yaml
new file mode 100644
index 0000000000..0dcb487b95
--- /dev/null
+++ b/releasenotes/notes/bug-1734625-419fd0e21bd332f6.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ The SchedulerReportClient
+ (``nova.scheduler.client.report.SchedulerReportClient``) sends requests
+ with the global request ID in the ``X-Openstack-Request-Id`` header
+ to the placement service. `Bug 1734625`_
+
+ .. _Bug 1734625: https://bugs.launchpad.net/nova/+bug/1734625