diff options
-rw-r--r-- | nova/cells/messaging.py | 2 | ||||
-rw-r--r-- | nova/compute/api.py | 6 | ||||
-rw-r--r-- | nova/compute/manager.py | 28 | ||||
-rw-r--r-- | nova/network/neutronv2/api.py | 5 | ||||
-rw-r--r-- | nova/tests/compute/test_compute.py | 11 | ||||
-rw-r--r-- | nova/tests/compute/test_compute_mgr.py | 63 | ||||
-rw-r--r-- | nova/tests/network/test_neutronv2.py | 49 | ||||
-rw-r--r-- | nova/tests/virt/hyperv/test_vmutils.py | 4 | ||||
-rw-r--r-- | nova/tests/virt/hyperv/test_vmutilsv2.py | 4 | ||||
-rw-r--r-- | nova/tests/virt/libvirt/test_driver.py | 70 | ||||
-rw-r--r-- | nova/tests/virt/libvirt/test_rbd.py | 30 | ||||
-rw-r--r-- | nova/virt/hyperv/vmutils.py | 5 | ||||
-rw-r--r-- | nova/virt/hyperv/vmutilsv2.py | 4 | ||||
-rw-r--r-- | nova/virt/ironic/driver.py | 2 | ||||
-rw-r--r-- | nova/virt/libvirt/driver.py | 21 | ||||
-rw-r--r-- | nova/virt/libvirt/rbd_utils.py | 31 |
16 files changed, 286 insertions, 49 deletions
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py index 87f691e34e..37ede57fb3 100644 --- a/nova/cells/messaging.py +++ b/nova/cells/messaging.py @@ -750,7 +750,7 @@ class _TargetedMessageMethods(_BaseMessageMethods): def service_delete(self, message, service_id): """Deletes the specified service.""" - self.host_api.service_delete(message.ctxt, service_id) + self.host_api._service_delete(message.ctxt, service_id) def proxy_rpc_to_manager(self, message, host_name, rpc_message, topic, timeout): diff --git a/nova/compute/api.py b/nova/compute/api.py index 3d816f4e5c..7da5deb21a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -3391,9 +3391,13 @@ class HostAPI(base.Base): service.save() return service + def _service_delete(self, context, service_id): + """Performs the actual Service deletion operation.""" + objects.Service.get_by_id(context, service_id).destroy() + def service_delete(self, context, service_id): """Deletes the specified service.""" - objects.Service.get_by_id(context, service_id).destroy() + self._service_delete(context, service_id) def instance_get_all_by_host(self, context, host_name): """Return all instances on the given host.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 88a7ee32c6..89cace08f3 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -860,6 +860,18 @@ class ComputeManager(manager.Manager): self.consoleauth_rpcapi.delete_tokens_for_instance(context, instance.uuid) + def _create_reservations(self, context, instance, project_id, user_id): + vcpus = instance.vcpus + mem_mb = instance.memory_mb + + quotas = objects.Quotas(context=context) + quotas.reserve(project_id=project_id, + user_id=user_id, + instances=-1, + cores=-vcpus, + ram=-mem_mb) + return quotas + def _init_instance(self, context, instance): '''Initialize this instance during service init.''' @@ -953,14 +965,11 @@ class ComputeManager(manager.Manager): instance.obj_load_attr('system_metadata') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) - # FIXME(comstud): This needs fixed. We should be creating - # reservations and updating quotas, because quotas - # wouldn't have been updated for this instance since it is - # still in DELETING. See bug 1296414. - # - # Create a dummy quota object for now. - quotas = objects.Quotas.from_reservations( - context, None, instance=instance) + project_id, user_id = objects.quotas.ids_from_instance( + context, instance) + quotas = self._create_reservations(context, instance, + project_id, user_id) + self._delete_instance(context, instance, bdms, quotas) except Exception: # we don't want that an exception blocks the init_host @@ -2003,7 +2012,10 @@ class ComputeManager(manager.Manager): # Get swap out of the list swap = driver_block_device.get_swap(swap) + root_device_name = instance.get('root_device_name') + return {'swap': swap, + 'root_device_name': root_device_name, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 8d4d517e0d..0fb09dbe40 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -1366,6 +1366,11 @@ class API(base_api.NetworkAPI): current_neutron_port_map[current_neutron_port['id']] = ( current_neutron_port) + # In that case we should repopulate ports from the state of + # Neutron. + if not port_ids: + port_ids = current_neutron_port_map.keys() + for port_id in port_ids: current_neutron_port = current_neutron_port_map.get(port_id) if current_neutron_port: diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 4b263a38fe..d9f97ecc1a 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -2793,6 +2793,7 @@ class ComputeTestCase(BaseTestCase): expected = { 'swap': None, 'ephemerals': [], + 'root_device_name': None, 'block_device_mapping': [{ 'connection_info': { 'driver_volume_type': 'rbd' @@ -2824,6 +2825,7 @@ class ComputeTestCase(BaseTestCase): expected = { 'swap': None, 'ephemerals': [], + 'root_device_name': None, 'block_device_mapping': [{ 'connection_info': { 'driver_volume_type': 'rbd' @@ -2888,7 +2890,8 @@ class ComputeTestCase(BaseTestCase): 'virtual_name': 'ephemeral0'}, {'device_name': '/dev/vdc', 'num': 1, 'size': 2, 'virtual_name': 'ephemeral1'}], - 'block_device_mapping': [] + 'block_device_mapping': [], + 'root_device_name': None } block_device_info = ( @@ -5423,6 +5426,7 @@ class ComputeTestCase(BaseTestCase): self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration') self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance), {'swap': None, 'ephemerals': [], + 'root_device_name': None, 'block_device_mapping': []}, mox.IgnoreArg(), mox.IgnoreArg(), @@ -5492,7 +5496,8 @@ class ComputeTestCase(BaseTestCase): 'rollback_live_migration_at_destination') block_device_info = { - 'swap': None, 'ephemerals': [], 'block_device_mapping': []} + 'swap': None, 'ephemerals': [], 'block_device_mapping': [], + 'root_device_name': None} self.compute.driver.get_instance_disk_info( instance.name, block_device_info=block_device_info).AndReturn('fake_disk') @@ -5668,6 +5673,7 @@ class ComputeTestCase(BaseTestCase): post_live_migration.assert_has_calls([ mock.call(c, instance, {'swap': None, 'ephemerals': [], + 'root_device_name': None, 'block_device_mapping': []}, None)]) unfilter_instance.assert_has_calls([mock.call(instance, [])]) migration = {'source_compute': srchost, @@ -5826,6 +5832,7 @@ class ComputeTestCase(BaseTestCase): 'rollback_live_migration_at_destination') self.compute.driver.rollback_live_migration_at_destination(c, instance, [], {'swap': None, 'ephemerals': [], + 'root_device_name': None, 'block_device_mapping': []}, destroy_disks=True, migrate_data=None) diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 3e3cfd6f56..ce16e97b32 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -430,7 +430,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): def test_init_instance_stuck_in_deleting(self): instance = fake_instance.fake_instance_obj( self.context, + project_id='fake', uuid='fake-uuid', + vcpus=1, + memory_mb=64, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, host=self.compute.host, @@ -440,18 +443,64 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute, '_delete_instance') self.mox.StubOutWithMock(instance, 'obj_load_attr') + self.mox.StubOutWithMock(self.compute, '_create_reservations') bdms = [] + quotas = objects.quotas.Quotas(self.context) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid).AndReturn(bdms) + self.compute._create_reservations(self.context, instance, + instance.project_id, + instance.user_id).AndReturn(quotas) self.compute._delete_instance(self.context, instance, bdms, mox.IgnoreArg()) self.mox.ReplayAll() self.compute._init_instance(self.context, instance) + @mock.patch.object(objects.Instance, 'get_by_uuid') + @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') + def test_init_instance_stuck_in_deleting_raises_exception( + self, mock_get_by_instance_uuid, mock_get_by_uuid): + + instance = fake_instance.fake_instance_obj( + self.context, + project_id='fake', + uuid='fake-uuid', + vcpus=1, + memory_mb=64, + metadata={}, + system_metadata={}, + host=self.compute.host, + vm_state=vm_states.ACTIVE, + task_state=task_states.DELETING, + expected_attrs=['metadata', 'system_metadata']) + + bdms = [] + reservations = ['fake-resv'] + + def _create_patch(name, attr): + patcher = mock.patch.object(name, attr) + mocked_obj = patcher.start() + self.addCleanup(patcher.stop) + return mocked_obj + + mock_delete_instance = _create_patch(self.compute, '_delete_instance') + mock_set_instance_error_state = _create_patch( + self.compute, '_set_instance_error_state') + mock_create_reservations = _create_patch(self.compute, + '_create_reservations') + + mock_create_reservations.return_value = reservations + mock_get_by_instance_uuid.return_value = bdms + mock_get_by_uuid.return_value = instance + mock_delete_instance.side_effect = test.TestingException('test') + self.compute._init_instance(self.context, instance) + mock_set_instance_error_state.assert_called_once_with( + self.context, instance) + def _test_init_instance_reverts_crashed_migrations(self, old_vm_state=None): power_on = True if (not old_vm_state or @@ -648,20 +697,32 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): def test_init_instance_deletes_error_deleting_instance(self): instance = fake_instance.fake_instance_obj( self.context, - uuid='fake', + project_id='fake', + uuid='fake-uuid', + vcpus=1, + memory_mb=64, vm_state=vm_states.ERROR, host=self.compute.host, task_state=task_states.DELETING) + self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute, '_delete_instance') self.mox.StubOutWithMock(instance, 'obj_load_attr') + self.mox.StubOutWithMock(objects.quotas, 'ids_from_instance') + self.mox.StubOutWithMock(self.compute, '_create_reservations') bdms = [] + quotas = objects.quotas.Quotas(self.context) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid).AndReturn(bdms) + objects.quotas.ids_from_instance(self.context, instance).AndReturn( + (instance.project_id, instance.user_id)) + self.compute._create_reservations(self.context, instance, + instance.project_id, + instance.user_id).AndReturn(quotas) self.compute._delete_instance(self.context, instance, bdms, mox.IgnoreArg()) self.mox.ReplayAll() diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 59d5e1ba6a..5740c16680 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -2528,6 +2528,55 @@ class TestNeutronv2(TestNeutronv2Base): self.assertEqual(nw_infos[4]['id'], 'port4') self.assertEqual(nw_infos[5]['id'], 'port5') + @mock.patch('nova.network.neutronv2.api.API._nw_info_get_subnets') + @mock.patch('nova.network.neutronv2.api.API._nw_info_get_ips') + @mock.patch('nova.network.neutronv2.api.API._nw_info_build_network') + @mock.patch('nova.network.neutronv2.api.API._gather_port_ids_and_networks') + def test_build_network_info_model_empty( + self, mock_gather_port_ids_and_networks, + mock_nw_info_build_network, + mock_nw_info_get_ips, + mock_nw_info_get_subnets): + api = neutronapi.API() + + fake_inst = objects.Instance() + fake_inst.project_id = 'fake' + fake_inst.uuid = 'uuid' + fake_inst.info_cache = objects.InstanceInfoCache() + fake_inst.info_cache.network_info = model.NetworkInfo() + fake_ports = [ + # admin_state_up=True and status='ACTIVE' thus vif.active=True + {'id': 'port1', + 'network_id': 'net-id', + 'admin_state_up': True, + 'status': 'ACTIVE', + 'fixed_ips': [{'ip_address': '1.1.1.1'}], + 'mac_address': 'de:ad:be:ef:00:01', + 'binding:vif_type': model.VIF_TYPE_BRIDGE, + 'binding:vnic_type': model.VNIC_TYPE_NORMAL, + 'binding:vif_details': {}, + }, + ] + fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] + + neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes( + ).AndReturn(self.moxed_client) + self.moxed_client.list_ports( + tenant_id='fake', device_id='uuid').AndReturn( + {'ports': fake_ports}) + + mock_gather_port_ids_and_networks.return_value = (None, None) + mock_nw_info_build_network.return_value = (None, None) + mock_nw_info_get_ips.return_value = [] + mock_nw_info_get_subnets.return_value = fake_subnets + + self.mox.ReplayAll() + neutronv2.get_client('fake') + + nw_infos = api._build_network_info_model( + self.context, fake_inst) + self.assertEqual(len(nw_infos), 1) + def test_get_subnets_from_port(self): api = neutronapi.API() diff --git a/nova/tests/virt/hyperv/test_vmutils.py b/nova/tests/virt/hyperv/test_vmutils.py index 4a2815cf41..fee46e4382 100644 --- a/nova/tests/virt/hyperv/test_vmutils.py +++ b/nova/tests/virt/hyperv/test_vmutils.py @@ -589,7 +589,9 @@ class VMUtilsTestCase(test.NoDBTestCase): attrs = {'ElementName': 'fake_name', 'Notes': '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'} vs.configure_mock(**attrs) - self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs] + vs2 = mock.MagicMock(ElementName='fake_name2', Notes=None) + self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs, + vs2] response = self._vmutils.list_instance_notes() self.assertEqual([(attrs['ElementName'], [attrs['Notes']])], response) diff --git a/nova/tests/virt/hyperv/test_vmutilsv2.py b/nova/tests/virt/hyperv/test_vmutilsv2.py index d247f70124..3ea22c34b4 100644 --- a/nova/tests/virt/hyperv/test_vmutilsv2.py +++ b/nova/tests/virt/hyperv/test_vmutilsv2.py @@ -124,7 +124,9 @@ class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase): attrs = {'ElementName': 'fake_name', 'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']} vs.configure_mock(**attrs) - self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs] + vs2 = mock.MagicMock(ElementName='fake_name2', Notes=None) + self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs, + vs2] response = self._vmutils.list_instance_notes() self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 571bc1be5d..e3699fe18f 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -5995,7 +5995,9 @@ class LibvirtConnTestCase(test.TestCase): conn.plug_vifs(mox.IsA(inst_ref), nw_info) self.mox.ReplayAll() - result = conn.pre_live_migration(c, inst_ref, vol, nw_info, None) + result = conn.pre_live_migration( + c, inst_ref, vol, nw_info, None, + migrate_data={"block_migration": False}) target_res = {'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}} @@ -6191,6 +6193,32 @@ class LibvirtConnTestCase(test.TestCase): self.assertTrue(create_image_mock.called) self.assertIsInstance(res, dict) + def test_pre_live_migration_block_migrate_fails(self): + bdms = [{ + 'connection_info': { + 'serial': '12345', + u'data': { + 'device_path': + u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.t-lun-X' + } + }, + 'mount_device': '/dev/sda'}] + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = db.instance_create(self.context, self.test_instance) + + with contextlib.nested( + mock.patch.object(drvr, '_create_images_and_backing'), + mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), + mock.patch.object(drvr, 'plug_vifs'), + mock.patch.object(drvr, '_connect_volume'), + mock.patch.object(driver, 'block_device_info_get_mapping', + return_value=bdms)): + self.assertRaises(exception.MigrationError, + drvr.pre_live_migration, + self.context, instance, block_device_info=None, + network_info=[], disk_info={}, migrate_data={}) + def test_get_instance_disk_info_works_correctly(self): # Test data instance_ref = db.instance_create(self.context, self.test_instance) @@ -8978,7 +9006,8 @@ Active: 8381604 kB got = conn._get_instance_capabilities() self.assertEqual(want, got) - def test_event_dispatch(self): + @mock.patch.object(greenthread, 'spawn_after') + def test_event_dispatch(self, mock_spawn_after): # Validate that the libvirt self-pipe for forwarding # events between threads is working sanely conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @@ -9015,19 +9044,26 @@ Active: 8381604 kB conn._queue_event(event4) conn._dispatch_events() - want_events = [event1, event2, event3, event4] + want_events = [event1, event2, event3] self.assertEqual(want_events, got_events) + # STOPPED is delayed so it's handled separately + mock_spawn_after.assert_called_once_with( + conn._lifecycle_delay, conn.emit_event, event4) + def test_event_lifecycle(self): # Validate that libvirt events are correctly translated # to Nova events conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) got_events = [] - def handler(event): - got_events.append(event) + def spawn_after(seconds, func, *args, **kwargs): + got_events.append(args[0]) + return mock.Mock(spec=greenthread.GreenThread) - conn.register_event_listener(handler) + greenthread.spawn_after = mock.Mock(side_effect=spawn_after) + + conn.register_event_listener(lambda e: None) conn._init_events_pipe() fake_dom_xml = """ <domain type='kvm'> @@ -9055,22 +9091,18 @@ Active: 8381604 kB self.assertEqual(got_events[0].transition, virtevent.EVENT_LIFECYCLE_STOPPED) - @mock.patch.object(libvirt_driver.LibvirtDriver, 'emit_event') - def test_event_emit_delayed_call_now(self, emit_event_mock): - self.flags(virt_type="kvm", group="libvirt") - conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - conn._event_emit_delayed(None) - emit_event_mock.assert_called_once_with(None) - - @mock.patch.object(greenthread, 'spawn_after') - def test_event_emit_delayed_call_delayed(self, spawn_after_mock): - CONF.set_override("virt_type", "xen", group="libvirt") - conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + def test_event_emit_delayed_call_delayed(self): event = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STOPPED) - conn._event_emit_delayed(event) - spawn_after_mock.assert_called_once_with(15, conn.emit_event, event) + for virt_type in ("kvm", "xen"): + spawn_after_mock = mock.Mock() + greenthread.spawn_after = spawn_after_mock + CONF.set_override("virt_type", virt_type, group="libvirt") + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + conn._event_emit_delayed(event) + spawn_after_mock.assert_called_once_with( + 15, conn.emit_event, event) @mock.patch.object(greenthread, 'spawn_after') def test_event_emit_delayed_call_delayed_pending(self, spawn_after_mock): diff --git a/nova/tests/virt/libvirt/test_rbd.py b/nova/tests/virt/libvirt/test_rbd.py index bcbdc25f59..acc1d38345 100644 --- a/nova/tests/virt/libvirt/test_rbd.py +++ b/nova/tests/virt/libvirt/test_rbd.py @@ -14,6 +14,7 @@ import mock from nova import exception +from nova import objects from nova.openstack.common import log as logging from nova import test from nova import utils @@ -281,3 +282,32 @@ class RbdTestCase(test.NoDBTestCase): rbd.remove.assert_called_once_with(client.ioctx, '12345_test') client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) + + @mock.patch.object(rbd_utils, 'rbd') + @mock.patch.object(rbd_utils, 'rados') + @mock.patch.object(rbd_utils, 'RADOSClient') + def _test_cleanup_exception(self, exception_name, + mock_client, mock_rados, mock_rbd): + instance = objects.Instance(id=1, uuid='12345') + + setattr(mock_rbd, exception_name, test.TestingException) + rbd = mock_rbd.RBD.return_value + rbd.remove.side_effect = test.TestingException + rbd.list.return_value = ['12345_test', '111_test'] + + client = mock_client.return_value + with mock.patch('eventlet.greenthread.sleep'): + self.driver.cleanup_volumes(instance) + rbd.remove.assert_any_call(client.ioctx, '12345_test') + # NOTE(danms): 10 retries + 1 final attempt to propagate = 11 + self.assertEqual(11, len(rbd.remove.call_args_list)) + + def test_cleanup_volumes_fail_not_found(self): + self._test_cleanup_exception('ImageBusy') + + def test_cleanup_volumes_fail_snapshots(self): + self._test_cleanup_exception('ImageHasSnapshots') + + def test_cleanup_volumes_fail_other(self): + self.assertRaises(test.TestingException, + self._test_cleanup_exception, 'DoesNotExist') diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py index ebe33b83ce..82a941b4e6 100644 --- a/nova/virt/hyperv/vmutils.py +++ b/nova/virt/hyperv/vmutils.py @@ -108,8 +108,9 @@ class VMUtils(object): for vs in self._conn.Msvm_VirtualSystemSettingData( ['ElementName', 'Notes'], SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS): - instance_notes.append((vs.ElementName, - [v for v in vs.Notes.split('\n') if v])) + if vs.Notes is not None: + instance_notes.append( + (vs.ElementName, [v for v in vs.Notes.split('\n') if v])) return instance_notes diff --git a/nova/virt/hyperv/vmutilsv2.py b/nova/virt/hyperv/vmutilsv2.py index 4156f622b6..1c196731ad 100644 --- a/nova/virt/hyperv/vmutilsv2.py +++ b/nova/virt/hyperv/vmutilsv2.py @@ -78,7 +78,9 @@ class VMUtilsV2(vmutils.VMUtils): for vs in self._conn.Msvm_VirtualSystemSettingData( ['ElementName', 'Notes'], VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED): - instance_notes.append((vs.ElementName, [v for v in vs.Notes if v])) + if vs.Notes is not None: + instance_notes.append( + (vs.ElementName, [v for v in vs.Notes if v])) return instance_notes diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py index ae09257483..b1ec2b7401 100644 --- a/nova/virt/ironic/driver.py +++ b/nova/virt/ironic/driver.py @@ -62,8 +62,10 @@ opts = [ cfg.StrOpt('admin_username', help='Ironic keystone admin name'), cfg.StrOpt('admin_password', + secret=True, help='Ironic keystone admin password.'), cfg.StrOpt('admin_auth_token', + secret=True, help='Ironic keystone auth token.'), cfg.StrOpt('admin_url', help='Keystone public API endpoint.'), diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index c26546b264..b7f465230a 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -436,14 +436,11 @@ class LibvirtDriver(driver.ComputeDriver): self._volume_api = volume.API() self._image_api = image.API() self._events_delayed = {} - # Note(toabctl): During a reboot of a Xen domain, STOPPED and + # Note(toabctl): During a reboot of a domain, STOPPED and # STARTED events are sent. To prevent shutting # down the domain during a reboot, delay the # STOPPED lifecycle event some seconds. - if CONF.libvirt.virt_type == "xen": - self._lifecycle_delay = 15 - else: - self._lifecycle_delay = 0 + self._lifecycle_delay = 15 sysinfo_serial_funcs = { 'none': lambda: None, @@ -655,7 +652,7 @@ class LibvirtDriver(driver.ComputeDriver): if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED: # Delay STOPPED event, as they may be followed by a STARTED - # event in case the instance is rebooting, when runned with Xen + # event in case the instance is rebooting id_ = greenthread.spawn_after(self._lifecycle_delay, self.emit_event, event) self._events_delayed[event.uuid] = id_ @@ -5559,6 +5556,18 @@ class LibvirtDriver(driver.ComputeDriver): CONF.libvirt.virt_type, vol) self._connect_volume(connection_info, disk_info) + if is_block_migration and len(block_device_mapping): + # NOTE(stpierre): if this instance has mapped volumes, + # we can't do a block migration, since that will + # result in volumes being copied from themselves to + # themselves, which is a recipe for disaster. + LOG.error( + _LE('Cannot block migrate instance %s with mapped volumes') % + instance.uuid) + raise exception.MigrationError( + _('Cannot block migrate instance %s with mapped volumes') % + instance.uuid) + # We call plug_vifs before the compute manager calls # ensure_filtering_rules_for_instance, to ensure bridge is set up # Retry operation is necessary because continuously request comes, diff --git a/nova/virt/libvirt/rbd_utils.py b/nova/virt/libvirt/rbd_utils.py index e638cf97c8..9712349204 100644 --- a/nova/virt/libvirt/rbd_utils.py +++ b/nova/virt/libvirt/rbd_utils.py @@ -30,6 +30,7 @@ from nova.i18n import _LW from nova.openstack.common import excutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging +from nova.openstack.common import loopingcall from nova.openstack.common import units from nova import utils @@ -254,6 +255,18 @@ class RBDDriver(object): utils.execute('rbd', 'import', *args) def cleanup_volumes(self, instance): + def _cleanup_vol(ioctx, volume, retryctx): + try: + rbd.RBD().remove(client.ioctx, volume) + raise loopingcall.LoopingCallDone(retvalue=False) + except (rbd.ImageBusy, rbd.ImageHasSnapshots): + LOG.warn(_LW('rbd remove %(volume)s in pool %(pool)s ' + 'failed'), + {'volume': volume, 'pool': self.pool}) + retryctx['retries'] -= 1 + if retryctx['retries'] <= 0: + raise loopingcall.LoopingCallDone() + with RADOSClient(self, self.pool) as client: def belongs_to_instance(disk): @@ -262,12 +275,18 @@ class RBDDriver(object): # pylint: disable=E1101 volumes = rbd.RBD().list(client.ioctx) for volume in filter(belongs_to_instance, volumes): - try: - rbd.RBD().remove(client.ioctx, volume) - except (rbd.ImageNotFound, rbd.ImageHasSnapshots): - LOG.warn(_LW('rbd remove %(volume)s in pool %(pool)s ' - 'failed'), - {'volume': volume, 'pool': self.pool}) + # NOTE(danms): We let it go for ten seconds + retryctx = {'retries': 10} + timer = loopingcall.FixedIntervalLoopingCall( + _cleanup_vol, client.ioctx, volume, retryctx) + timed_out = timer.start(interval=1).wait() + if timed_out: + # NOTE(danms): Run this again to propagate the error, but + # if it succeeds, don't raise the loopingcall exception + try: + _cleanup_vol(client.ioctx, volume, retryctx) + except loopingcall.LoopingCallDone: + pass def get_pool_info(self): with RADOSClient(self) as client: |