diff options
author | Lee Yarwood <lyarwood@redhat.com> | 2017-01-31 15:26:38 +0000 |
---|---|---|
committer | Lee Yarwood <lyarwood@redhat.com> | 2017-02-03 09:51:52 +0000 |
commit | 9f8d15dcc1b4db9b1116d19b6af0f6cac15ff9fe (patch) | |
tree | 515a553816def1e98abb61ad5d2b2d4334d57ad4 | |
parent | 99f8a3c4e9d903d48e5c7e245bcb2d3299b7904d (diff) | |
download | nova-9f8d15dcc1b4db9b1116d19b6af0f6cac15ff9fe.tar.gz |
libvirt: Limit destroying disks during cleanup to spawn
Iab5afdf1b5b now ensures that cleanup is always called when VIF plugging
errors are encountered by _create_domain_and_network. At present cleanup
is always called with destroy_disks=True leading to any local instance
files being removed from the host.
_create_domain_and_network itself has various callers such as resume and
hard_reboot that assume these files will persist any such failures. As a
result the removal of these files will leave instances in an unbootable
state.
In order to correct this an additional destroy_disks_on_failures kwarg
is now provided to _create_domain_and_network and passed down into
cleanup. This kwarg defaults to False and is only enabled when
_create_domain_and_network is used to spawn a new instance.
Closes-bug: #1660647
Change-Id: I38c969690fedb71c5b5ec4418c1b0dd53df733ec
(cherry picked from commit 67aa277b4ef623c9877b97bfd7952f0bb1d80a81)
-rw-r--r-- | nova/tests/unit/virt/libvirt/test_driver.py | 34 | ||||
-rw-r--r-- | nova/virt/libvirt/driver.py | 20 |
2 files changed, 41 insertions, 13 deletions
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index fff91a4cdc..37e62db1f0 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -10185,7 +10185,8 @@ class LibvirtConnTestCase(test.NoDBTestCase): def fake_create_domain_and_network( context, xml, instance, network_info, disk_info, block_device_info=None, power_on=True, reboot=False, - vifs_already_plugged=False, post_xml_callback=None): + vifs_already_plugged=False, post_xml_callback=None, + destroy_disks_on_failure=False): # The config disk should be created by this callback, so we need # to execute it. post_xml_callback() @@ -14214,14 +14215,22 @@ class LibvirtConnTestCase(test.NoDBTestCase): drvr._create_domain_and_network, self.context, 'xml', instance, [], None) mock_cleanup.assert_called_once_with(self.context, instance, - [], None, None) + [], None, None, False) + # destroy_disks_on_failure=True, used only by spawn() + mock_cleanup.reset_mock() + self.assertRaises(test.TestingException, + drvr._create_domain_and_network, + self.context, 'xml', instance, [], None, + destroy_disks_on_failure=True) + mock_cleanup.assert_called_once_with(self.context, instance, + [], None, None, True) the_test() def test_cleanup_failed_start_no_guest(self): drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False) with mock.patch.object(drvr, 'cleanup') as mock_cleanup: - drvr._cleanup_failed_start(None, None, None, None, None) + drvr._cleanup_failed_start(None, None, None, None, None, False) self.assertTrue(mock_cleanup.called) def test_cleanup_failed_start_inactive_guest(self): @@ -14229,7 +14238,7 @@ class LibvirtConnTestCase(test.NoDBTestCase): guest = mock.MagicMock() guest.is_active.return_value = False with mock.patch.object(drvr, 'cleanup') as mock_cleanup: - drvr._cleanup_failed_start(None, None, None, None, guest) + drvr._cleanup_failed_start(None, None, None, None, guest, False) self.assertTrue(mock_cleanup.called) self.assertFalse(guest.poweroff.called) @@ -14238,7 +14247,7 @@ class LibvirtConnTestCase(test.NoDBTestCase): guest = mock.MagicMock() guest.is_active.return_value = True with mock.patch.object(drvr, 'cleanup') as mock_cleanup: - drvr._cleanup_failed_start(None, None, None, None, guest) + drvr._cleanup_failed_start(None, None, None, None, guest, False) self.assertTrue(mock_cleanup.called) self.assertTrue(guest.poweroff.called) @@ -14250,10 +14259,23 @@ class LibvirtConnTestCase(test.NoDBTestCase): with mock.patch.object(drvr, 'cleanup') as mock_cleanup: self.assertRaises(test.TestingException, drvr._cleanup_failed_start, - None, None, None, None, guest) + None, None, None, None, guest, False) self.assertTrue(mock_cleanup.called) self.assertTrue(guest.poweroff.called) + def test_cleanup_failed_start_failed_poweroff_destroy_disks(self): + drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False) + guest = mock.MagicMock() + guest.is_active.return_value = True + guest.poweroff.side_effect = test.TestingException + with mock.patch.object(drvr, 'cleanup') as mock_cleanup: + self.assertRaises(test.TestingException, + drvr._cleanup_failed_start, + None, None, None, None, guest, True) + mock_cleanup.called_once_with(None, None, network_info=None, + block_device_info=None, destroy_disks=True) + self.assertTrue(guest.poweroff.called) + @mock.patch('nova.volume.encryptors.get_encryption_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index ee0ab5bf66..a56bf16e1b 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2612,7 +2612,8 @@ class LibvirtDriver(driver.ComputeDriver): self._create_domain_and_network( context, xml, instance, network_info, disk_info, block_device_info=block_device_info, - post_xml_callback=gen_confdrive) + post_xml_callback=gen_confdrive, + destroy_disks_on_failure=True) LOG.debug("Instance is running", instance=instance) def _wait_for_boot(): @@ -4802,19 +4803,21 @@ class LibvirtDriver(driver.ComputeDriver): for vif in network_info if vif.get('active', True) is False] def _cleanup_failed_start(self, context, instance, network_info, - block_device_info, guest): + block_device_info, guest, destroy_disks): try: if guest and guest.is_active(): guest.poweroff() finally: self.cleanup(context, instance, network_info=network_info, - block_device_info=block_device_info) + block_device_info=block_device_info, + destroy_disks=destroy_disks) def _create_domain_and_network(self, context, xml, instance, network_info, disk_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False, - post_xml_callback=None): + post_xml_callback=None, + destroy_disks_on_failure=False): """Do required network setup and create domain.""" block_device_mapping = driver.block_device_info_get_mapping( @@ -4866,7 +4869,8 @@ class LibvirtDriver(driver.ComputeDriver): # bail here with excutils.save_and_reraise_exception(): self._cleanup_failed_start(context, instance, network_info, - block_device_info, guest) + block_device_info, guest, + destroy_disks_on_failure) except eventlet.timeout.Timeout: # We never heard from Neutron LOG.warning(_LW('Timeout waiting for vif plugging callback for ' @@ -4874,7 +4878,8 @@ class LibvirtDriver(driver.ComputeDriver): instance=instance) if CONF.vif_plugging_is_fatal: self._cleanup_failed_start(context, instance, network_info, - block_device_info, guest) + block_device_info, guest, + destroy_disks_on_failure) raise exception.VirtualInterfaceCreateException() except Exception: # Any other error, be sure to clean up @@ -4882,7 +4887,8 @@ class LibvirtDriver(driver.ComputeDriver): instance=instance) with excutils.save_and_reraise_exception(): self._cleanup_failed_start(context, instance, network_info, - block_device_info, guest) + block_device_info, guest, + destroy_disks_on_failure) # Resume only if domain has been paused if pause: |