summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bindep.txt3
-rw-r--r--doc/source/admin/evacuate.rst14
-rw-r--r--nova/compute/api.py4
-rw-r--r--nova/compute/manager.py18
-rw-r--r--nova/compute/resource_tracker.py18
-rw-r--r--nova/tests/functional/integrated_helpers.py43
-rw-r--r--nova/tests/functional/regressions/test_bug_1896463.py234
-rw-r--r--nova/tests/functional/regressions/test_bug_1978983.py80
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py30
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py59
-rw-r--r--releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml11
11 files changed, 451 insertions, 63 deletions
diff --git a/bindep.txt b/bindep.txt
index f0ca0afc12..9834820f05 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -34,7 +34,8 @@ postgresql-client [platform:dpkg]
postgresql-devel [platform:rpm test]
postgresql-server [platform:rpm]
python-dev [platform:dpkg test]
-python-devel [platform:rpm test]
+python-devel [platform:rpm !platform:base-py3 test]
+python3-devel [platform:rpm !platform:base-py2 test]
python3-all [platform:dpkg]
python3-all-dev [platform:dpkg]
python3-devel [platform:fedora]
diff --git a/doc/source/admin/evacuate.rst b/doc/source/admin/evacuate.rst
index ef9eccd931..18796d9c23 100644
--- a/doc/source/admin/evacuate.rst
+++ b/doc/source/admin/evacuate.rst
@@ -97,3 +97,17 @@ instances up and running.
using a pattern you might want to use the ``--strict`` flag which got introduced
in version 10.2.0 to make sure nova matches the ``FAILED_HOST``
exactly.
+
+.. note::
+ .. code-block:: bash
+
+ +------+--------+--------------+
+ | Name | Status | Task State |
+ +------+--------+--------------+
+ | vm_1 | ACTIVE | powering-off |
+ +------------------------------+
+
+ If the instance task state is not None, evacuation will be possible. However,
+ depending on the ongoing operation, there may be clean up required in other
+ services which the instance was using, such as neutron, cinder, glance, or
+ the storage backend. \ No newline at end of file
diff --git a/nova/compute/api.py b/nova/compute/api.py
index bc23cbeaa7..d2342e4965 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -4770,7 +4770,7 @@ class API(base.Base):
instance, migration.id)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
- vm_states.ERROR])
+ vm_states.ERROR], task_state=None)
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None, force=None):
"""Running evacuate to target host.
@@ -4797,7 +4797,7 @@ class API(base.Base):
context, instance.uuid)
instance.task_state = task_states.REBUILDING
- instance.save(expected_task_state=[None])
+ instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NOTE(danms): Create this as a tombstone for the source compute
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index fc0bdcb04e..0e013a3cc5 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -3479,19 +3479,11 @@ class ComputeManager(manager.Manager):
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise
else:
- instance.apply_migration_context()
- # NOTE (ndipanov): This save will now update the host and node
- # attributes making sure that next RT pass is consistent since
- # it will be based on the instance and not the migration DB
- # entry.
- instance.host = self.host
- instance.node = scheduled_node
- instance.save()
- instance.drop_migration_context()
-
- # NOTE (ndipanov): Mark the migration as done only after we
- # mark the instance as belonging to this host.
- self._set_migration_status(migration, 'done')
+ # NOTE(gibi): Let the resource tracker set the instance
+ # host and drop the migration context as we need to hold the
+ # COMPUTE_RESOURCE_SEMAPHORE to avoid the race with
+ # _update_available_resources. See bug 1896463.
+ self.rt.finish_evacuation(instance, scheduled_node, migration)
def _do_rebuild_instance_with_claim(self, claim_context, *args, **kwargs):
"""Helper to avoid deep nesting in the top-level method."""
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index b2ba0145cf..c34f705eaa 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -1801,3 +1801,21 @@ class ResourceTracker(object):
"""
self.pci_tracker.free_instance_claims(context, instance)
self.pci_tracker.save(context)
+
+ @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
+ def finish_evacuation(self, instance, node, migration):
+ instance.apply_migration_context()
+ # NOTE (ndipanov): This save will now update the host and node
+ # attributes making sure that next RT pass is consistent since
+ # it will be based on the instance and not the migration DB
+ # entry.
+ instance.host = self.host
+ instance.node = node
+ instance.save()
+ instance.drop_migration_context()
+
+ # NOTE (ndipanov): Mark the migration as done only after we
+ # mark the instance as belonging to this host.
+ if migration:
+ migration.status = 'done'
+ migration.save()
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index 0ecafbe81d..44d12fdbf6 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -73,6 +73,12 @@ def generate_new_element(items, prefix, numeric=False):
LOG.debug("Random collision on %s", candidate)
+# placeholder used as a default parameter value to distinguish between the case
+# when the parameter is specified by the caller with None from the case when it
+# was not specified
+NOT_SPECIFIED = object()
+
+
class _IntegratedTestBase(test.TestCase):
REQUIRES_LOCKING = True
ADMIN_API = False
@@ -385,6 +391,43 @@ class InstanceHelperMixin(object):
self._wait_for_state_change(self.api, server, server_expected_state)
self._wait_for_migration_status(server, [migration_expected_state])
+ def _evacuate_server(
+ self, server, extra_post_args=None, expected_host=None,
+ expected_state='ACTIVE', expected_task_state=NOT_SPECIFIED,
+ expected_migration_status='done'):
+ """Evacuate a server."""
+ api = getattr(self, 'admin_api', self.api)
+
+ post = {'evacuate': {}}
+ if extra_post_args:
+ post['evacuate'].update(extra_post_args)
+
+ expected_result = {'status': expected_state}
+ if expected_host:
+ expected_result['OS-EXT-SRV-ATTR:host'] = expected_host
+ if expected_task_state is not NOT_SPECIFIED:
+ expected_result['OS-EXT-STS:task_state'] = expected_task_state
+
+ api.post_server_action(server['id'], post)
+
+ # NOTE(gibi): The order of waiting for the migration and returning
+ # a fresh server from _wait_for_server_parameter is important as
+ # the compute manager sets status of the instance before sets the
+ # host and finally sets the migration status. So waiting for the
+ # migration first makes the returned server object more consistent.
+ self._wait_for_migration_status(server, [expected_migration_status])
+ return self._wait_for_server_parameter(api, server, expected_result)
+
+ def _start_server(self, server):
+ self.api.post_server_action(server['id'], {'os-start': None})
+ return self._wait_for_state_change(self.api, server, 'ACTIVE')
+
+ def _stop_server(self, server, wait_for_stop=True):
+ self.api.post_server_action(server['id'], {'os-stop': None})
+ if wait_for_stop:
+ return self._wait_for_state_change(self.api, server, 'SHUTOFF')
+ return server
+
class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
"""Base test class for functional tests that check provider usage
diff --git a/nova/tests/functional/regressions/test_bug_1896463.py b/nova/tests/functional/regressions/test_bug_1896463.py
new file mode 100644
index 0000000000..a6d38f4835
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1896463.py
@@ -0,0 +1,234 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import fixtures
+import time
+
+from oslo_config import cfg
+
+from nova import context
+from nova import objects
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+from nova.tests.unit.image import fake as fake_image
+from nova import utils
+from nova.virt import fake
+
+
+CONF = cfg.CONF
+
+
+class TestEvacuateResourceTrackerRace(
+ test.TestCase, integrated_helpers.InstanceHelperMixin,
+):
+ """Demonstrate bug #1896463.
+
+ Trigger a race condition between an almost finished evacuation that is
+ dropping the migration context, and the _update_available_resource()
+ periodic task that already loaded the instance list but haven't loaded the
+ migration list yet. The result is that the PCI allocation made by the
+ evacuation is deleted by the overlapping periodic task run and the instance
+ will not have PCI allocation after the evacuation.
+ """
+
+ def setUp(self):
+ super(TestEvacuateResourceTrackerRace, self).setUp()
+ self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
+ fake_image.stub_out_image_service(self)
+ self.addCleanup(fake_image.FakeImageService_reset)
+ self.placement = self.useFixture(func_fixtures.PlacementFixture()).api
+
+ self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+
+ self.admin_api = self.api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
+ self.admin_api.microversion = 'latest'
+ self.api = self.admin_api
+
+ self.start_service('conductor')
+ self.start_service('scheduler')
+
+ self.flags(compute_driver='fake.FakeDriverWithPciResources')
+ self.useFixture(
+ fake.FakeDriverWithPciResources.
+ FakeDriverWithPciResourcesConfigFixture())
+
+ self.compute1 = self.start_service('compute', host='host1')
+
+ self.compute1_id = self._get_compute_node_id_by_host('host1')
+ self.compute1_service_id = self.admin_api.get_services(
+ host='host1', binary='nova-compute')[0]['id']
+
+ self.compute2 = self.start_service('compute', host='host2')
+ self.compute2_id = self._get_compute_node_id_by_host('host2')
+ self.compute2_service_id = self.admin_api.get_services(
+ host='host2', binary='nova-compute')[0]['id']
+
+ # add extra ports and the related network to the neutron fixture
+ # specifically for these tests. It cannot be added globally in the
+ # fixture init as it adds a second network that makes auto allocation
+ # based test to fail due to ambiguous networks.
+ self.neutron._ports[self.neutron.sriov_port['id']] = \
+ copy.deepcopy(self.neutron.sriov_port)
+ self.neutron._networks[
+ self.neutron.network_2['id']] = self.neutron.network_2
+ self.neutron._subnets[
+ self.neutron.subnet_2['id']] = self.neutron.subnet_2
+
+ self.ctxt = context.get_admin_context()
+
+ def _get_compute_node_id_by_host(self, host):
+ # we specifically need the integer id of the node not the UUID so we
+ # need to use the old microversion
+ with utils.temporary_mutation(self.admin_api, microversion='2.52'):
+ hypers = self.admin_api.api_get(
+ 'os-hypervisors').body['hypervisors']
+ for hyper in hypers:
+ if hyper['hypervisor_hostname'] == host:
+ return hyper['id']
+
+ self.fail('Hypervisor with hostname=%s not found' % host)
+
+ def _assert_pci_device_allocated(
+ self, instance_uuid, compute_node_id, num=1):
+ """Assert that a given number of PCI devices are allocated to the
+ instance on the given host.
+ """
+
+ devices = objects.PciDeviceList.get_by_instance_uuid(
+ self.ctxt, instance_uuid)
+ devices_on_host = [dev for dev in devices
+ if dev.compute_node_id == compute_node_id]
+ self.assertEqual(num, len(devices_on_host))
+
+ def test_evacuate_races_with_update_available_resource(self):
+ # Create a server with a direct port to have PCI allocation
+ server_req = self._build_minimal_create_server_request(
+ self.api, 'test-server-for-bug-1896463',
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ networks=[{'port': self.neutron.sriov_port['id']}])
+ server_req['availability_zone'] = 'nova:host1'
+ created_server = self.api.post_server({'server': server_req})
+ server = self._wait_for_state_change(
+ self.admin_api, created_server, 'ACTIVE')
+
+ self._assert_pci_device_allocated(server['id'], self.compute1_id)
+ self._assert_pci_device_allocated(
+ server['id'], self.compute2_id, num=0)
+
+ # stop and force down the compute the instance is on to allow
+ # evacuation
+ self.compute1.stop()
+ self.admin_api.put_service(
+ self.compute1_service_id, {'forced_down': 'true'})
+
+ # Inject some sleeps both in the Instance.drop_migration_context and
+ # the MigrationList.get_in_progress_by_host_and_node code to make them
+ # overlap.
+ # We want to create the following execution scenario:
+ # 1) The evacuation makes a move claim on the dest including the PCI
+ # claim. This means there is a migration context. But the evacuation
+ # is not complete yet so the instance.host does not point to the
+ # dest host.
+ # 2) The dest resource tracker starts an _update_available_resource()
+ # periodic task and this task loads the list of instances on its
+ # host from the DB. Our instance is not in this list due to #1.
+ # 3) The evacuation finishes, the instance.host is set to the dest host
+ # and the migration context is deleted.
+ # 4) The periodic task now loads the list of in-progress migration from
+ # the DB to check for incoming our outgoing migrations. However due
+ # to #3 our instance is not in this list either.
+ # 5) The periodic task cleans up every lingering PCI claim that is not
+ # connected to any instance collected above from the instance list
+ # and from the migration list. As our instance is not in either of
+ # the lists, the resource tracker cleans up the PCI allocation for
+ # the already finished evacuation of our instance.
+ #
+ # Unfortunately we cannot reproduce the above situation without sleeps.
+ # We need that the evac starts first then the periodic starts, but not
+ # finishes, then evac finishes, then periodic finishes. If I trigger
+ # and run the whole periodic in a wrapper of drop_migration_context
+ # then I could not reproduce the situation described at #4). In general
+ # it is not
+ #
+ # evac
+ # |
+ # |
+ # | periodic
+ # | |
+ # | |
+ # | x
+ # |
+ # |
+ # x
+ #
+ # but
+ #
+ # evac
+ # |
+ # |
+ # | periodic
+ # | |
+ # | |
+ # | |
+ # x |
+ # |
+ # x
+ #
+ # what is needed need.
+ #
+ # Starting the periodic from the test in a separate thread at
+ # drop_migration_context() might work but that is an extra complexity
+ # in the test code. Also it might need a sleep still to make the
+ # reproduction stable but only one sleep instead of two.
+ orig_drop = objects.Instance.drop_migration_context
+
+ def slow_drop(*args, **kwargs):
+ time.sleep(1)
+ return orig_drop(*args, **kwargs)
+
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.objects.instance.Instance.drop_migration_context',
+ new=slow_drop))
+
+ orig_get_mig = objects.MigrationList.get_in_progress_by_host_and_node
+
+ def slow_get_mig(*args, **kwargs):
+ time.sleep(2)
+ return orig_get_mig(*args, **kwargs)
+
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.objects.migration.MigrationList.'
+ 'get_in_progress_by_host_and_node',
+ side_effect=slow_get_mig))
+
+ self.admin_api.post_server_action(server['id'], {'evacuate': {}})
+ # we trigger the _update_available_resource periodic to overlap with
+ # the already started evacuation
+ ctx = context.get_admin_context()
+ self.compute1.manager.update_available_resource(ctx)
+ self.compute2.manager.update_available_resource(ctx)
+
+ self._wait_for_server_parameter(
+ self.admin_api,
+ server,
+ {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'}
+ )
+
+ self._assert_pci_device_allocated(server['id'], self.compute1_id)
+ self._assert_pci_device_allocated(server['id'], self.compute2_id)
diff --git a/nova/tests/functional/regressions/test_bug_1978983.py b/nova/tests/functional/regressions/test_bug_1978983.py
new file mode 100644
index 0000000000..49430041ce
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1978983.py
@@ -0,0 +1,80 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+from nova.tests.unit.image import fake
+
+
+class EvacuateServerWithTaskState(
+ test.TestCase, integrated_helpers.InstanceHelperMixin,
+):
+ """Regression test for bug 1978983
+ If instance task state is powering-off or not None
+ instance should be allowed to evacuate.
+ """
+
+ def setUp(self):
+ super(EvacuateServerWithTaskState, self).setUp()
+ # Stub out external dependencies.
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ fake.stub_out_image_service(self)
+ self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+
+ # Start nova controller services.
+ self.start_service('conductor')
+ self.start_service('scheduler')
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+ self.admin_api = api_fixture.admin_api
+ self.api = api_fixture.api
+
+ self.image_id = self.api.get_images()[0]['id']
+
+ self.admin_api.microversion = 'latest'
+ self.api.microversion = 'latest'
+
+ self.src = self._start_compute(host='host1')
+ self.dest = self._start_compute(host='host2')
+
+ def test_evacuate_instance(self):
+ """Evacuating a server
+ """
+ server = self.admin_api.post_server(
+ dict(server=self._build_minimal_create_server_request(
+ self.api, 'test_evacuate_instance', self.image_id,
+ networks='none')))
+
+ server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
+ self.assertEqual(self.src.host, server['OS-EXT-SRV-ATTR:host'])
+
+ # stop host1 compute service
+ self.src.stop()
+ self.api.put_service_force_down(self.src.service_ref.uuid, True)
+
+ # poweroff instance
+ self._stop_server(server, wait_for_stop=False)
+ server = self._wait_for_server_parameter(self.admin_api,
+ server, {'OS-EXT-STS:task_state': 'powering-off'})
+
+ # evacuate instance
+ server = self._evacuate_server(
+ server, expected_host=self.dest.host
+ )
+ self.assertEqual(self.dest.host, server['OS-EXT-SRV-ATTR:host'])
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index b41577586e..c22aa01ffd 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -5161,18 +5161,21 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
node = uuidutils.generate_uuid() # ironic node uuid
instance = fake_instance.fake_instance_obj(self.context, node=node)
instance.migration_context = None
+ migration = objects.Migration(status='accepted')
with test.nested(
mock.patch.object(self.compute, '_get_compute_info'),
mock.patch.object(self.compute, '_do_rebuild_instance_with_claim'),
mock.patch.object(objects.Instance, 'save'),
- mock.patch.object(self.compute, '_set_migration_status'),
- ) as (mock_get, mock_rebuild, mock_save, mock_set):
- self.compute.rebuild_instance(self.context, instance, None, None,
- None, None, None, None, False,
- False, False, None, None, {}, None)
+ mock.patch.object(migration, 'save'),
+ ) as (mock_get, mock_rebuild, mock_save, mock_migration_save):
+ self.compute.rebuild_instance(
+ self.context, instance, None, None,
+ None, None, None, None, False,
+ False, False, migration, None, {}, None)
self.assertFalse(mock_get.called)
self.assertEqual(node, instance.node)
- mock_set.assert_called_once_with(None, 'done')
+ self.assertEqual('done', migration.status)
+ mock_migration_save.assert_called_once_with()
def test_rebuild_node_updated_if_recreate(self):
dead_node = uuidutils.generate_uuid()
@@ -5185,16 +5188,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
with test.nested(
mock.patch.object(self.compute, '_get_compute_info'),
mock.patch.object(self.compute, '_do_rebuild_instance_with_claim'),
- mock.patch.object(objects.Instance, 'save'),
- mock.patch.object(self.compute, '_set_migration_status'),
- ) as (mock_get, mock_rebuild, mock_save, mock_set):
+ ) as (mock_get, mock_rebuild):
mock_get.return_value.hypervisor_hostname = 'new-node'
- self.compute.rebuild_instance(self.context, instance, None, None,
- None, None, None, None, True,
- False, False, None, None, {}, None)
+ self.compute.rebuild_instance(
+ self.context, instance, None, None, None, None, None,
+ None, True, False, False, mock.sentinel.migration, None, {},
+ None)
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
- self.assertEqual('new-node', instance.node)
- mock_set.assert_called_once_with(None, 'done')
+ mock_rt.finish_evacuation.assert_called_once_with(
+ instance, 'new-node', mock.sentinel.migration)
# Make sure the rebuild_claim was called with the proper image_meta
# from the instance.
mock_rt.rebuild_claim.assert_called_once()
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index 27677bbf78..46a51dbf7c 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -627,12 +627,12 @@ class NovaProxyRequestHandlerBaseTestCase(test.NoDBTestCase):
self.wh.server.top_new_client(conn, address)
self.assertIsNone(self.wh._compute_rpcapi)
- def test_reject_open_redirect(self):
+ def test_reject_open_redirect(self, url='//example.com/%2F..'):
# This will test the behavior when an attempt is made to cause an open
# redirect. It should be rejected.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
- b'GET //example.com/%2F.. HTTP/1.1\r\n',
+ ('GET %s HTTP/1.1\r\n' % url).encode('utf-8'),
b''
]
@@ -657,39 +657,32 @@ class NovaProxyRequestHandlerBaseTestCase(test.NoDBTestCase):
result = output.readlines()
# Verify no redirect happens and instead a 400 Bad Request is returned.
- self.assertIn('400 URI must not start with //', result[0].decode())
+ # NOTE: As of python 3.10.6 there is a fix for this vulnerability,
+ # which will cause a 301 Moved Permanently error to be returned
+ # instead that redirects to a sanitized version of the URL with extra
+ # leading '/' characters removed.
+ # See https://github.com/python/cpython/issues/87389 for details.
+ # We will consider either response to be valid for this test. This will
+ # also help if and when the above fix gets backported to older versions
+ # of python.
+ errmsg = result[0].decode()
+ expected_nova = '400 URI must not start with //'
+ expected_cpython = '301 Moved Permanently'
+
+ self.assertTrue(expected_nova in errmsg or expected_cpython in errmsg)
+
+ # If we detect the cpython fix, verify that the redirect location is
+ # now the same url but with extra leading '/' characters removed.
+ if expected_cpython in errmsg:
+ location = result[3].decode()
+ location = location.removeprefix('Location: ').rstrip('\r\n')
+ self.assertTrue(
+ location.startswith('/example.com/%2F..'),
+ msg='Redirect location is not the expected sanitized URL',
+ )
def test_reject_open_redirect_3_slashes(self):
- # This will test the behavior when an attempt is made to cause an open
- # redirect. It should be rejected.
- mock_req = mock.MagicMock()
- mock_req.makefile().readline.side_effect = [
- b'GET ///example.com/%2F.. HTTP/1.1\r\n',
- b''
- ]
-
- client_addr = ('8.8.8.8', 54321)
- mock_server = mock.MagicMock()
- # This specifies that the server will be able to handle requests other
- # than only websockets.
- mock_server.only_upgrade = False
-
- # Constructing a handler will process the mock_req request passed in.
- handler = websocketproxy.NovaProxyRequestHandler(
- mock_req, client_addr, mock_server)
-
- # Collect the response data to verify at the end. The
- # SimpleHTTPRequestHandler writes the response data to a 'wfile'
- # attribute.
- output = io.BytesIO()
- handler.wfile = output
- # Process the mock_req again to do the capture.
- handler.do_GET()
- output.seek(0)
- result = output.readlines()
-
- # Verify no redirect happens and instead a 400 Bad Request is returned.
- self.assertIn('400 URI must not start with //', result[0].decode())
+ self.test_reject_open_redirect(url='///example.com/%2F..')
class NovaWebsocketSecurityProxyTestCase(test.NoDBTestCase):
diff --git a/releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml b/releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml
new file mode 100644
index 0000000000..46ebf0bd2d
--- /dev/null
+++ b/releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ If compute service is down in source node and user try to stop
+ instance, instance gets stuck at powering-off, hence evacuation fails with
+ msg: Cannot 'evacuate' instance <instance-id> while it is in
+ task_state powering-off.
+ It is now possible for evacuation to ignore the vm task state.
+ For more details see: `bug 1978983`_
+
+ .. _`bug 1978983`: https://bugs.launchpad.net/nova/+bug/1978983 \ No newline at end of file