diff options
author | Zuul <zuul@review.openstack.org> | 2018-02-16 00:27:32 +0000 |
---|---|---|
committer | Gerrit Code Review <review@openstack.org> | 2018-02-16 00:27:32 +0000 |
commit | 28937e61665c929ea24fe75b5610e12e1335f455 (patch) | |
tree | b6fad889146c55556d6967eee5d9b7ab97472b44 | |
parent | 01b756f960ed19ab801994d08d749dd94d729a22 (diff) | |
parent | 5a10047f9dd4383f6ec9046d392f26c93f8420d4 (diff) | |
download | nova-28937e61665c929ea24fe75b5610e12e1335f455.tar.gz |
Merge "Refine waiting for vif plug events during _hard_reboot" into stable/queens
-rw-r--r-- | nova/tests/unit/virt/libvirt/test_driver.py | 37 | ||||
-rw-r--r-- | nova/virt/libvirt/driver.py | 63 |
2 files changed, 80 insertions, 20 deletions
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index 1ec8f446fa..77cf199552 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -12661,6 +12661,28 @@ class LibvirtConnTestCase(test.NoDBTestCase, mock_hard_reboot.assert_called_once_with(self.context, instance, [], None) + @mock.patch('nova.virt.libvirt.LibvirtDriver._get_neutron_events') + @mock.patch('nova.virt.libvirt.LibvirtDriver.plug_vifs') + @mock.patch('nova.virt.libvirt.LibvirtDriver._lxc_disk_handler') + @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain') + def test_create_domain_and_network_reboot(self, mock_create, mock_handler, + mock_plug, mock_events): + # Verify that we call get_neutron_events with reboot=True if + # create_domain_and_network was called with reboot=True + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance = objects.Instance(**self.test_instance) + network_info = _fake_network_info(self, 1) + + @mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering') + @mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter') + @mock.patch.object(drvr.firewall_driver, 'apply_instance_filter') + def _do_create(mock_apply, mock_prepare, mock_setup): + drvr._create_domain_and_network(self.context, mock.sentinel.xml, + instance, network_info, + reboot=True) + _do_create() + mock_events.assert_called_once_with(network_info, reboot=True) + @mock.patch('nova.virt.libvirt.LibvirtDriver.get_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml') @@ -12730,7 +12752,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, block_device_info=block_device_info, mdevs=[uuids.mdev1]) mock_create_domain_and_network.assert_called_once_with(self.context, dummyxml, instance, network_info, - block_device_info=block_device_info, vifs_already_plugged=True) + block_device_info=block_device_info, reboot=True) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @@ -15633,6 +15655,19 @@ class LibvirtConnTestCase(test.NoDBTestCase, events = drvr._get_neutron_events(network_info) self.assertEqual([('network-vif-plugged', '1')], events) + def test_get_neutron_events_reboot(self): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + bridge = network_model.VIF_TYPE_BRIDGE + ovs = network_model.VIF_TYPE_OVS + network_info = [network_model.VIF(id='1'), + network_model.VIF(id='2', active=True), + network_model.VIF(id='3', type=bridge), + network_model.VIF(id='4', type=ovs)] + events = drvr._get_neutron_events(network_info, reboot=True) + self.assertEqual([('network-vif-plugged', '1'), + ('network-vif-plugged', '2'), + ('network-vif-plugged', '4')], events) + def test_unplug_vifs_ignores_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 8463c24853..e8b4bf0aaf 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2708,14 +2708,9 @@ class LibvirtDriver(driver.ComputeDriver): # Initialize all the necessary networking, block devices and # start the instance. - # NOTE(melwitt): Pass vifs_already_plugged=True here even though we've - # unplugged vifs earlier. The behavior of neutron plug events depends - # on which vif type we're using and we are working with a stale network - # info cache here, so won't rely on waiting for neutron plug events. - # vifs_already_plugged=True means "do not wait for neutron plug events" - self._create_domain_and_network(context, xml, instance, network_info, - block_device_info=block_device_info, - vifs_already_plugged=True) + self._create_domain_and_network( + context, xml, instance, network_info, + block_device_info=block_device_info, reboot=True) self._prepare_pci_devices_for_use( pci_manager.get_instance_pci_devs(instance, 'all')) @@ -5385,14 +5380,25 @@ class LibvirtDriver(driver.ComputeDriver): if CONF.vif_plugging_is_fatal: raise exception.VirtualInterfaceCreateException() - def _get_neutron_events(self, network_info): - # NOTE(danms): We need to collect any VIFs that are currently - # down that we expect a down->up event for. Anything that is - # already up will not undergo that transition, and for - # anything that might be stale (cache-wise) assume it's - # already up so we don't block on it. + def _get_neutron_events(self, network_info, reboot=False): + def eventable(vif): + if reboot: + # NOTE(melwitt): We won't expect events for the bridge vif + # type during a reboot because the neutron agent might not + # detect that we have unplugged and plugged vifs with os-vif. + # We also disregard the 'active' status of the vif during a + # reboot because the stale network_info we get from the compute + # manager won't show active=False for the vifs we've unplugged. + return vif.get('type') != network_model.VIF_TYPE_BRIDGE + # NOTE(danms): We need to collect any VIFs that are currently + # down that we expect a down->up event for. Anything that is + # already up will not undergo that transition, and for + # anything that might be stale (cache-wise) assume it's + # already up so we don't block on it. + return vif.get('active', True) is False + return [('network-vif-plugged', vif['id']) - for vif in network_info if vif.get('active', True) is False] + for vif in network_info if eventable(vif)] def _cleanup_failed_start(self, context, instance, network_info, block_device_info, guest, destroy_disks): @@ -5408,14 +5414,33 @@ class LibvirtDriver(driver.ComputeDriver): block_device_info=None, power_on=True, vifs_already_plugged=False, post_xml_callback=None, - destroy_disks_on_failure=False): - - """Do required network setup and create domain.""" + destroy_disks_on_failure=False, + reboot=False): + + """Do required network setup and create domain. + + :param context: nova.context.RequestContext for volume API calls + :param xml: Guest domain XML + :param instance: nova.objects.Instance object + :param network_info: nova.network.model.NetworkInfo for the instance + :param block_device_info: Legacy block device info dict + :param power_on: Whether to power on the guest after creating the XML + :param vifs_already_plugged: False means "wait for neutron plug events" + if using neutron, qemu/kvm, power_on=True, + and CONF.vif_plugging_timeout configured + :param post_xml_callback: Optional callback to call after creating the + guest domain XML + :param destroy_disks_on_failure: Whether to destroy the disks if we + fail during guest domain creation + :param reboot: Whether or not this is being called during a reboot. If + we are rebooting, we will need to handle waiting for + neutron plug events differently + """ timeout = CONF.vif_plugging_timeout if (self._conn_supports_start_paused and utils.is_neutron() and not vifs_already_plugged and power_on and timeout): - events = self._get_neutron_events(network_info) + events = self._get_neutron_events(network_info, reboot=reboot) else: events = [] |