summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2022-08-20 06:35:50 +0000
committerGerrit Code Review <review@openstack.org>2022-08-20 06:35:50 +0000
commit783c855ede2021923d23c1d43763d38776d682f3 (patch)
tree8cb36930bb0723525f0604aa77e7b53c6a7460e2
parentfa540fabce020cade91adb52800197f688c843d9 (diff)
parent75f9b288f8edfd24affe5ecbc1f3efb6a63726e4 (diff)
downloadnova-783c855ede2021923d23c1d43763d38776d682f3.tar.gz
Merge "Don't unset Instance.old_flavor, new_flavor until necessary" into stable/train
-rw-r--r--nova/compute/manager.py54
-rw-r--r--nova/tests/functional/libvirt/test_numa_servers.py13
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py11
3 files changed, 45 insertions, 33 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index ce74fec4c6..d29b58534b 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -4296,14 +4296,17 @@ class ComputeManager(manager.Manager):
instance=instance)
finally:
# Whether an error occurred or not, at this point the
- # instance is on the dest host so to avoid leaking
- # allocations in placement, delete them here.
+ # instance is on the dest host. Avoid leaking allocations
+ # in placement by deleting them here...
self._delete_allocation_after_move(
context, instance, migration)
- # Also as the instance is not any more on this host, update
- # the scheduler about the move
+ # ...inform the scheduler about the move...
self._delete_scheduler_instance_info(
context, instance.uuid)
+ # ...and unset the cached flavor information (this is done
+ # last since the resource tracker relies on it for its
+ # periodic tasks)
+ self._delete_stashed_flavor_info(instance)
do_confirm_resize(context, instance, migration.id)
@@ -4342,13 +4345,6 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.RESIZE_CONFIRM,
phase=fields.NotificationPhase.START)
- # NOTE(danms): delete stashed migration information
- old_instance_type = instance.old_flavor
- instance.old_flavor = None
- instance.new_flavor = None
- instance.system_metadata.pop('old_vm_state', None)
- instance.save()
-
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
@@ -4373,8 +4369,9 @@ class ComputeManager(manager.Manager):
# instance.migration_context so make sure to not call
# instance.drop_migration_context() until after drop_move_claim
# is called.
- self.rt.drop_move_claim(context, instance, migration.source_node,
- old_instance_type, prefix='old_')
+ self.rt.drop_move_claim(
+ context, instance, migration.source_node, instance.old_flavor,
+ prefix='old_')
instance.drop_migration_context()
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
@@ -4425,6 +4422,13 @@ class ComputeManager(manager.Manager):
'migration_uuid': migration.uuid})
raise
+ def _delete_stashed_flavor_info(self, instance):
+ """Remove information about the flavor change after a resize."""
+ instance.old_flavor = None
+ instance.new_flavor = None
+ instance.system_metadata.pop('old_vm_state', None)
+ instance.save()
+
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@@ -4542,6 +4546,16 @@ class ComputeManager(manager.Manager):
revert the resized attributes in the database.
"""
+ try:
+ self._finish_revert_resize(
+ context, instance, migration, request_spec)
+ finally:
+ self._delete_stashed_flavor_info(instance)
+
+ def _finish_revert_resize(
+ self, context, instance, migration, request_spec=None,
+ ):
+ """Inner version of finish_revert_resize."""
with self._error_out_instance_on_exception(context, instance):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -4551,18 +4565,16 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.RESIZE_REVERT,
phase=fields.NotificationPhase.START, bdms=bdms)
- # NOTE(mriedem): delete stashed old_vm_state information; we
- # default to ACTIVE for backwards compatibility if old_vm_state
- # is not set
- old_vm_state = instance.system_metadata.pop('old_vm_state',
- vm_states.ACTIVE)
+ # Get stashed old_vm_state information to determine if guest should
+ # be powered on after spawn; we default to ACTIVE for backwards
+ # compatibility if old_vm_state is not set
+ old_vm_state = instance.system_metadata.get(
+ 'old_vm_state', vm_states.ACTIVE)
self._set_instance_info(instance, instance.old_flavor)
- instance.old_flavor = None
- instance.new_flavor = None
instance.host = migration.source_compute
instance.node = migration.source_node
- instance.save()
+ instance.save(expected_task_state=[task_states.RESIZE_REVERTING])
try:
source_allocations = self._revert_allocation(
diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py
index d73bfc4503..97b81e82c4 100644
--- a/nova/tests/functional/libvirt/test_numa_servers.py
+++ b/nova/tests/functional/libvirt/test_numa_servers.py
@@ -679,8 +679,7 @@ class NUMAServersTest(NUMAServersTestBase):
self.ctxt, dst_host,
).numa_topology,
)
- # FIXME(stephenfin): There should still be two pinned cores here
- self.assertEqual(0, len(src_numa_topology.cells[0].pinned_cpus))
+ self.assertEqual(2, len(src_numa_topology.cells[0].pinned_cpus))
self.assertEqual(2, len(dst_numa_topology.cells[0].pinned_cpus))
# before continuing with the actualy confirm process
@@ -724,14 +723,10 @@ class NUMAServersTest(NUMAServersTestBase):
# Now confirm the resize
- # FIXME(stephenfin): This should be successful, but it's failing with a
- # HTTP 500 due to bug #1879878
post = {'confirmResize': None}
- exc = self.assertRaises(
- client.OpenStackApiException,
- self.api.post_server_action, server['id'], post)
- self.assertEqual(500, exc.response.status_code)
- self.assertIn('CPUUnpinningInvalid', str(exc))
+ self.api.post_server_action(server['id'], post)
+
+ server = self._wait_for_state_change(server, 'ACTIVE')
class NUMAServerTestWithCountingQuotaFromPlacement(NUMAServersTest):
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 526302de8a..17fcb0e99b 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -8315,9 +8315,14 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.migration)
mock_delete.assert_called_once_with(self.context, self.instance,
self.migration)
- mock_save.assert_called_with(expected_task_state=
- [None, task_states.DELETING,
- task_states.SOFT_DELETING])
+ mock_save.assert_has_calls([
+ mock.call(
+ expected_task_state=[
+ None, task_states.DELETING, task_states.SOFT_DELETING,
+ ],
+ ),
+ mock.call(),
+ ])
mock_delete_scheduler_info.assert_called_once_with(
self.context, self.instance.uuid)