summaryrefslogtreecommitdiff
path: root/nova/scheduler
diff options
context:
space:
mode:
authorBalazs Gibizer <balazs.gibizer@est.tech>2019-08-13 10:18:54 +0200
committerEric Fried <openstack@fried.cc>2019-09-12 09:19:27 +0000
commit5114b61d52766c3176c906e646e23eb4ba768954 (patch)
treeca37790fcd75a3724a338a41cb75da4f6a5a9444 /nova/scheduler
parent5fa49cd0b8b6015aa61b4312b2ce1ae780c42c64 (diff)
downloadnova-5114b61d52766c3176c906e646e23eb4ba768954.tar.gz
Support reverting migration / resize with bandwidth
When a migration is reverted the source host allocation held by the migration is swapped back to the instance. If the instance is bandwidth aware then also the allocation key of the port's binding:profile needs to be updated to point back to the resource providers of the source host provider tree. To be able to do that we have to re-calculate the request group - resource provider mapping for the source host based on the resource requests in the neutron ports of the instance and the resource allocation of the instance on the source host. Alternatively we could store such mapping in the MigrationContext during the move operation. blueprint: support-move-ops-with-qos-ports Change-Id: Ib50b6b02208f5bd2972de8a6f8f685c19745514c
Diffstat (limited to 'nova/scheduler')
-rw-r--r--nova/scheduler/utils.py35
1 files changed, 31 insertions, 4 deletions
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index 0ffa54003b..110efc7f62 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -1146,6 +1146,33 @@ def fill_provider_mapping(
ar = jsonutils.loads(host_selection.allocation_request)
allocs = ar['allocations']
+ fill_provider_mapping_based_on_allocation(
+ context, report_client, request_spec, allocs)
+
+
+def fill_provider_mapping_based_on_allocation(
+ context, report_client, request_spec, allocation):
+ """Fills out the request group - resource provider mapping in the
+ request spec based on the current allocation of the instance.
+
+ The fill_provider_mapping() variant is expected to be called in every
+ scenario when a Selection object is available from the scheduler. However
+ in case of revert operations such Selection does not exists. In this case
+ the mapping is calculated based on the allocation of the source host the
+ move operation is reverting to.
+
+ :param context: The security context
+ :param report_client: SchedulerReportClient instance to be used to
+ communicate with placement
+ :param request_spec: The RequestSpec object associated with the
+ operation
+ :param allocation: allocation dict of the instance, keyed by RP UUID.
+ """
+
+ # Exit early if this request spec does not require mappings.
+ if not request_spec.maps_requested_resources:
+ return
+
# NOTE(gibi): Getting traits from placement for each instance in a
# instance multi-create scenario is unnecessarily expensive. But
# instance multi-create cannot be used with pre-created neutron ports
@@ -1158,9 +1185,9 @@ def fill_provider_mapping(
provider_traits = {
rp_uuid: report_client.get_provider_traits(
context, rp_uuid).traits
- for rp_uuid in allocs}
- # NOTE(gibi): The allocs dict is in the format of the PUT /allocations
+ for rp_uuid in allocation}
+ # NOTE(gibi): The allocation dict is in the format of the PUT /allocations
# and that format can change. The current format can be detected from
- # host_selection.allocation_request_version
+ # allocation_request_version key of the Selection object.
request_spec.map_requested_resources_to_providers(
- allocs, provider_traits)
+ allocation, provider_traits)