diff options
author | Balazs Gibizer <balazs.gibizer@ericsson.com> | 2018-10-10 16:53:49 +0200 |
---|---|---|
committer | Balazs Gibizer <balazs.gibizer@ericsson.com> | 2018-10-25 15:44:59 +0200 |
commit | fd351903a1759109f66d0c66d42cd3a1b7f23861 (patch) | |
tree | fb0c36cd9942239a3d5ca728530a0e280fbc485c /nova/conductor | |
parent | 66297f0c4ac99e38d9cf1cb3fbeb6fbaa93217c6 (diff) | |
download | nova-fd351903a1759109f66d0c66d42cd3a1b7f23861.tar.gz |
Reject forced move with nested source allocation
Both os-migrateLive and evacuate server API actions support a force
flag. If force is set to True in the request then nova does not call the
scheduler but instead tries to blindly copy the source host allocation
to the desitnation host. If the source host allocation contains
resources from more than the root RP then such blind copy cannot be done
properly. Therefore this patch detects such situation and rejects
the forced move operation if the server has complex allocations on the
source host.
There is a separate bluperint
remove-force-flag-from-live-migrate-and-evacuate that will remove the
force flag in a new API microversion.
Note that before the force flag was added to these APIs Nova bypassed the
scheduler when the target host was specified.
Blueprint: use-nested-allocation-candidates
Change-Id: I7cbd5d9fb875ebf72995362e0b6693492ce32051
Diffstat (limited to 'nova/conductor')
-rw-r--r-- | nova/conductor/manager.py | 6 | ||||
-rw-r--r-- | nova/conductor/tasks/live_migrate.py | 2 | ||||
-rw-r--r-- | nova/conductor/tasks/migrate.py | 18 |
3 files changed, 21 insertions, 5 deletions
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index d299491b2a..3bf63947d1 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -937,7 +937,11 @@ class ComputeTaskManager(base.Base): 'task_state': None}, ex, request_spec) LOG.warning('Rebuild failed: %s', six.text_type(ex), instance=instance) - + except exception.NoValidHost: + with excutils.save_and_reraise_exception(): + if migration: + migration.status = 'error' + migration.save() else: # At this point, the user is either: # diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py index acb29b7095..4deb809247 100644 --- a/nova/conductor/tasks/live_migrate.py +++ b/nova/conductor/tasks/live_migrate.py @@ -115,7 +115,7 @@ class LiveMigrationTask(base.TaskBase): scheduler_utils.claim_resources_on_destination( self.context, self.scheduler_client.reportclient, self.instance, source_node, dest_node, - source_node_allocations=self._held_allocations, + source_allocations=self._held_allocations, consumer_generation=None) # dest_node is a ComputeNode object, so we need to get the actual diff --git a/nova/conductor/tasks/migrate.py b/nova/conductor/tasks/migrate.py index 8ba1668dc5..a26a7202cd 100644 --- a/nova/conductor/tasks/migrate.py +++ b/nova/conductor/tasks/migrate.py @@ -28,6 +28,17 @@ LOG = logging.getLogger(__name__) def replace_allocation_with_migration(context, instance, migration): """Replace instance's allocation with one for a migration. + :raises: keystoneauth1.exceptions.base.ClientException on failure to + communicate with the placement API + :raises: ConsumerAllocationRetrievalFailed if reading the current + allocation from placement fails + :raises: ComputeHostNotFound if the host of the instance is not found in + the databse + :raises: AllocationMoveFailed if moving the allocation from the + instance.uuid to the migration.uuid fails due to parallel + placement operation on the instance consumer + :raises: NoValidHost if placement rejectes the update for other reasons + (e.g. not enough resources) :returns: (source_compute_node, migration_allocation) """ try: @@ -45,9 +56,10 @@ def replace_allocation_with_migration(context, instance, migration): schedclient = scheduler_client.SchedulerClient() reportclient = schedclient.reportclient - orig_alloc = reportclient.get_allocations_for_consumer_by_provider( - context, source_cn.uuid, instance.uuid) - if not orig_alloc: + orig_alloc = reportclient.get_allocs_for_consumer( + context, instance.uuid)['allocations'] + root_alloc = orig_alloc.get(source_cn.uuid, {}).get('resources', {}) + if not root_alloc: LOG.debug('Unable to find existing allocations for instance on ' 'source compute node: %s. This is normal if you are not ' 'using the FilterScheduler.', source_cn.uuid, |