diff options
Diffstat (limited to 'nova/tests')
71 files changed, 3180 insertions, 1127 deletions
diff --git a/nova/tests/fixtures/libvirt.py b/nova/tests/fixtures/libvirt.py index 891e957200..5ccf01e40f 100644 --- a/nova/tests/fixtures/libvirt.py +++ b/nova/tests/fixtures/libvirt.py @@ -309,7 +309,7 @@ class FakePCIDevice(object): self, dev_type, bus, slot, function, iommu_group, numa_node, *, vf_ratio=None, multiple_gpu_types=False, generic_types=False, parent=None, vend_id=None, vend_name=None, prod_id=None, - prod_name=None, driver_name=None, vpd_fields=None + prod_name=None, driver_name=None, vpd_fields=None, mac_address=None, ): """Populate pci devices @@ -331,6 +331,8 @@ class FakePCIDevice(object): :param prod_id: (str) The product ID. :param prod_name: (str) The product name. :param driver_name: (str) The driver name. + :param mac_address: (str) The MAC of the device. + Used in case of SRIOV PFs """ self.dev_type = dev_type @@ -349,6 +351,7 @@ class FakePCIDevice(object): self.prod_id = prod_id self.prod_name = prod_name self.driver_name = driver_name + self.mac_address = mac_address self.vpd_fields = vpd_fields @@ -364,7 +367,9 @@ class FakePCIDevice(object): assert not self.vf_ratio, 'vf_ratio does not apply for PCI devices' if self.dev_type in ('PF', 'VF'): - assert self.vf_ratio, 'require vf_ratio for PFs and VFs' + assert ( + self.vf_ratio is not None + ), 'require vf_ratio for PFs and VFs' if self.dev_type == 'VF': assert self.parent, 'require parent for VFs' @@ -497,6 +502,10 @@ class FakePCIDevice(object): def XMLDesc(self, flags): return self.pci_device + @property + def address(self): + return "0000:%02x:%02x.%1x" % (self.bus, self.slot, self.function) + # TODO(stephenfin): Remove all of these HostFooDevicesInfo objects in favour of # a unified devices object @@ -609,7 +618,7 @@ class HostPCIDevicesInfo(object): self, dev_type, bus, slot, function, iommu_group, numa_node, vf_ratio=None, multiple_gpu_types=False, generic_types=False, parent=None, vend_id=None, vend_name=None, prod_id=None, - prod_name=None, driver_name=None, vpd_fields=None, + prod_name=None, driver_name=None, vpd_fields=None, mac_address=None, ): pci_dev_name = _get_libvirt_nodedev_name(bus, slot, function) @@ -632,6 +641,7 @@ class HostPCIDevicesInfo(object): prod_name=prod_name, driver_name=driver_name, vpd_fields=vpd_fields, + mac_address=mac_address, ) self.devices[pci_dev_name] = dev return dev @@ -651,6 +661,13 @@ class HostPCIDevicesInfo(object): return [dev for dev in self.devices if self.devices[dev].is_capable_of_mdevs] + def get_pci_address_mac_mapping(self): + return { + device.address: device.mac_address + for dev_addr, device in self.devices.items() + if device.mac_address + } + class FakeMdevDevice(object): template = """ @@ -2182,6 +2199,15 @@ class LibvirtFixture(fixtures.Fixture): def __init__(self, stub_os_vif=True): self.stub_os_vif = stub_os_vif + self.pci_address_to_mac_map = collections.defaultdict( + lambda: '52:54:00:1e:59:c6') + + def update_sriov_mac_address_mapping(self, pci_address_to_mac_map): + self.pci_address_to_mac_map.update(pci_address_to_mac_map) + + def fake_get_mac_by_pci_address(self, pci_addr, pf_interface=False): + res = self.pci_address_to_mac_map[pci_addr] + return res def setUp(self): super().setUp() @@ -2194,31 +2220,39 @@ class LibvirtFixture(fixtures.Fixture): self.useFixture( fixtures.MockPatch('nova.virt.libvirt.utils.get_fs_info')) - self.useFixture( - fixtures.MockPatch('nova.compute.utils.get_machine_ips')) + self.mock_get_machine_ips = self.useFixture( + fixtures.MockPatch('nova.compute.utils.get_machine_ips')).mock # libvirt driver needs to call out to the filesystem to get the # parent_ifname for the SRIOV VFs. - self.useFixture(fixtures.MockPatch( - 'nova.pci.utils.get_ifname_by_pci_address', - return_value='fake_pf_interface_name')) + self.mock_get_ifname_by_pci_address = self.useFixture( + fixtures.MockPatch( + "nova.pci.utils.get_ifname_by_pci_address", + return_value="fake_pf_interface_name", + ) + ).mock self.useFixture(fixtures.MockPatch( 'nova.pci.utils.get_mac_by_pci_address', - return_value='52:54:00:1e:59:c6')) + side_effect=self.fake_get_mac_by_pci_address)) # libvirt calls out to sysfs to get the vfs ID during macvtap plug - self.useFixture(fixtures.MockPatch( - 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1)) + self.mock_get_vf_num_by_pci_address = self.useFixture( + fixtures.MockPatch( + 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1 + ) + ).mock # libvirt calls out to privsep to set the mac and vlan of a macvtap - self.useFixture(fixtures.MockPatch( - 'nova.privsep.linux_net.set_device_macaddr_and_vlan')) + self.mock_set_device_macaddr_and_vlan = self.useFixture( + fixtures.MockPatch( + 'nova.privsep.linux_net.set_device_macaddr_and_vlan')).mock # libvirt calls out to privsep to set the port state during macvtap # plug - self.useFixture(fixtures.MockPatch( - 'nova.privsep.linux_net.set_device_macaddr')) + self.mock_set_device_macaddr = self.useFixture( + fixtures.MockPatch( + 'nova.privsep.linux_net.set_device_macaddr')).mock # Don't assume that the system running tests has a valid machine-id self.useFixture(fixtures.MockPatch( @@ -2233,8 +2267,8 @@ class LibvirtFixture(fixtures.Fixture): # Ensure tests perform the same on all host architectures fake_uname = os_uname( 'Linux', '', '5.4.0-0-generic', '', obj_fields.Architecture.X86_64) - self.useFixture( - fixtures.MockPatch('os.uname', return_value=fake_uname)) + self.mock_uname = self.useFixture( + fixtures.MockPatch('os.uname', return_value=fake_uname)).mock # ...and on all machine types fake_loaders = [ diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py index 810c6f62dd..f9e011dd67 100644 --- a/nova/tests/fixtures/nova.py +++ b/nova/tests/fixtures/nova.py @@ -904,6 +904,16 @@ class WarningsFixture(fixtures.Fixture): message='Implicit coercion of SELECT and textual SELECT .*', category=sqla_exc.SADeprecationWarning) + # Enable general SQLAlchemy warnings also to ensure we're not doing + # silly stuff. It's possible that we'll need to filter things out here + # with future SQLAlchemy versions, but that's a good thing + + warnings.filterwarnings( + 'error', + module='nova', + category=sqla_exc.SAWarning, + ) + self.addCleanup(self._reset_warning_filters) def _reset_warning_filters(self): @@ -1032,9 +1042,15 @@ class OSAPIFixture(fixtures.Fixture): self.api = client.TestOpenStackClient( 'fake', base_url, project_id=self.project_id, roles=['reader', 'member']) + self.alternative_api = client.TestOpenStackClient( + 'fake', base_url, project_id=self.project_id, + roles=['reader', 'member']) self.admin_api = client.TestOpenStackClient( 'admin', base_url, project_id=self.project_id, roles=['reader', 'member', 'admin']) + self.alternative_admin_api = client.TestOpenStackClient( + 'admin', base_url, project_id=self.project_id, + roles=['reader', 'member', 'admin']) self.reader_api = client.TestOpenStackClient( 'reader', base_url, project_id=self.project_id, roles=['reader']) @@ -1130,9 +1146,9 @@ class PoisonFunctions(fixtures.Fixture): # Don't poison the function if it's already mocked import nova.virt.libvirt.host if not isinstance(nova.virt.libvirt.host.Host._init_events, mock.Mock): - self.useFixture(fixtures.MockPatch( + self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.Host._init_events', - side_effect=evloop)) + evloop)) class IndirectionAPIFixture(fixtures.Fixture): diff --git a/nova/tests/functional/api_sample_tests/test_remote_consoles.py b/nova/tests/functional/api_sample_tests/test_remote_consoles.py index 986826bfee..e304402ee9 100644 --- a/nova/tests/functional/api_sample_tests/test_remote_consoles.py +++ b/nova/tests/functional/api_sample_tests/test_remote_consoles.py @@ -13,6 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. +from unittest import mock + +from nova.compute import api as compute +from nova import exception from nova.tests.functional.api_sample_tests import test_servers HTTP_RE = r'(https?://)([\w\d:#@%/;$()~_?\+-=\\.&](#!)?)*' @@ -38,6 +42,22 @@ class ConsolesSampleJsonTests(test_servers.ServersSampleBase): self._verify_response('get-vnc-console-post-resp', {'url': HTTP_RE}, response, 200) + @mock.patch.object(compute.API, 'get_vnc_console') + def test_get_vnc_console_instance_invalid_state(self, + mock_get_vnc_console): + uuid = self._post_server() + + def fake_get_vnc_console(*args, **kwargs): + raise exception.InstanceInvalidState( + attr='fake_attr', state='fake_state', method='fake_method', + instance_uuid=uuid) + + mock_get_vnc_console.side_effect = fake_get_vnc_console + response = self._do_post('servers/%s/action' % uuid, + 'get-vnc-console-post-req', + {'action': 'os-getVNCConsole'}) + self.assertEqual(409, response.status_code) + def test_get_spice_console(self): uuid = self._post_server() response = self._do_post('servers/%s/action' % uuid, diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py index 81b7dfb68c..758c15f371 100644 --- a/nova/tests/functional/compute/test_resource_tracker.py +++ b/nova/tests/functional/compute/test_resource_tracker.py @@ -29,7 +29,6 @@ from nova import conf from nova import context from nova import objects from nova import test -from nova.tests import fixtures as nova_fixtures from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.virt import driver as virt_driver @@ -694,15 +693,6 @@ class TestProviderConfig(integrated_helpers.ProviderUsageBaseTestCase): feature a vm cannot be spawning using a custom trait and then start a compute service that provides that trait. """ - - self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.GlanceFixture(self)) - - # Start nova services. - self.api = self.useFixture(nova_fixtures.OSAPIFixture( - api_version='v2.1')).admin_api - self.api.microversion = 'latest' - self.start_service('conductor') # start nova-compute that will not have the additional trait. self._start_compute("fake-host-1") diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py index 028ef53d7e..bd6244546c 100644 --- a/nova/tests/functional/integrated_helpers.py +++ b/nova/tests/functional/integrated_helpers.py @@ -540,8 +540,9 @@ class InstanceHelperMixin: self.api.post_server_action( server['id'], {'os-migrateLive': {'host': None, 'block_migration': 'auto'}}) - self._wait_for_state_change(server, server_expected_state) + server = self._wait_for_state_change(server, server_expected_state) self._wait_for_migration_status(server, [migration_expected_state]) + return server _live_migrate_server = _live_migrate diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py index 3d8aec8106..68c6e294c1 100644 --- a/nova/tests/functional/libvirt/base.py +++ b/nova/tests/functional/libvirt/base.py @@ -42,7 +42,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase): super(ServersTestBase, self).setUp() self.useFixture(nova_fixtures.LibvirtImageBackendFixture()) - self.useFixture(nova_fixtures.LibvirtFixture()) + self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture()) self.useFixture(nova_fixtures.OSBrickFixture()) self.useFixture(fixtures.MockPatch( @@ -51,12 +51,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase): self.useFixture(fixtures.MockPatch( 'nova.virt.libvirt.LibvirtDriver._get_local_gb_info', return_value={'total': 128, 'used': 44, 'free': 84})) - self.useFixture(fixtures.MockPatch( + self.mock_is_valid_hostname = self.useFixture(fixtures.MockPatch( 'nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname', - return_value=True)) - self.useFixture(fixtures.MockPatch( + return_value=True)).mock + self.mock_file_open = self.useFixture(fixtures.MockPatch( 'nova.virt.libvirt.driver.libvirt_utils.file_open', - side_effect=lambda *a, **k: io.BytesIO(b''))) + side_effect=lambda *a, **k: io.BytesIO(b''))).mock self.useFixture(fixtures.MockPatch( 'nova.privsep.utils.supports_direct_io', return_value=True)) @@ -114,7 +114,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase): def start_compute( self, hostname='compute1', host_info=None, pci_info=None, mdev_info=None, vdpa_info=None, libvirt_version=None, - qemu_version=None, + qemu_version=None, cell_name=None, connection=None ): """Start a compute service. @@ -124,27 +124,53 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase): :param host_info: A fakelibvirt.HostInfo object for the host. Defaults to a HostInfo with 2 NUMA nodes, 2 cores per node, 2 threads per core, and 16GB of RAM. + :param connection: A fake libvirt connection. You should not provide it + directly. However it is used by restart_compute_service to + implement restart without loosing the hypervisor state. :returns: The hostname of the created service, which can be used to lookup the created service and UUID of the assocaited resource provider. """ + if connection and ( + host_info or + pci_info or + mdev_info or + vdpa_info or + libvirt_version or + qemu_version + ): + raise ValueError( + "Either an existing connection instance can be provided or a " + "list of parameters for a new connection" + ) def _start_compute(hostname, host_info): - fake_connection = self._get_connection( - host_info, pci_info, mdev_info, vdpa_info, libvirt_version, - qemu_version, hostname, - ) + if connection: + fake_connection = connection + else: + fake_connection = self._get_connection( + host_info, pci_info, mdev_info, vdpa_info, libvirt_version, + qemu_version, hostname, + ) + + # If the compute is configured with PCI devices then we need to + # make sure that the stubs around sysfs has the MAC address + # information for the PCI PF devices + if pci_info: + self.libvirt.update_sriov_mac_address_mapping( + pci_info.get_pci_address_mac_mapping()) # This is fun. Firstly we need to do a global'ish mock so we can # actually start the service. - with mock.patch('nova.virt.libvirt.host.Host.get_connection', - return_value=fake_connection): - compute = self.start_service('compute', host=hostname) - # Once that's done, we need to tweak the compute "service" to - # make sure it returns unique objects. We do this inside the - # mock context to avoid a small window between the end of the - # context and the tweaking where get_connection would revert to - # being an autospec mock. - compute.driver._host.get_connection = lambda: fake_connection + orig_con = self.mock_conn.return_value + self.mock_conn.return_value = fake_connection + compute = self.start_service( + 'compute', host=hostname, cell_name=cell_name) + # Once that's done, we need to tweak the compute "service" to + # make sure it returns unique objects. + compute.driver._host.get_connection = lambda: fake_connection + # Then we revert the local mock tweaking so the next compute can + # get its own + self.mock_conn.return_value = orig_con return compute # ensure we haven't already registered services with these hostnames @@ -159,6 +185,74 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase): return hostname + def restart_compute_service( + self, + hostname, + host_info=None, + pci_info=None, + mdev_info=None, + vdpa_info=None, + libvirt_version=None, + qemu_version=None, + keep_hypervisor_state=True, + ): + """Stops the service and starts a new one to have realistic restart + + :param hostname: the hostname of the nova-compute service to be + restarted + :param keep_hypervisor_state: If True then we reuse the fake connection + from the existing driver. If False a new connection will be created + based on the other parameters provided + """ + # We are intentionally not calling super() here. Nova's base test class + # defines starting and restarting compute service with a very + # different signatures and also those calls are cannot be made aware of + # the intricacies of the libvirt fixture. So we simply hide that + # implementation. + + if keep_hypervisor_state and ( + host_info or + pci_info or + mdev_info or + vdpa_info or + libvirt_version or + qemu_version + ): + raise ValueError( + "Either keep_hypervisor_state=True or a list of libvirt " + "parameters can be provided but not both" + ) + + compute = self.computes.pop(hostname) + self.compute_rp_uuids.pop(hostname) + + # NOTE(gibi): The service interface cannot be used to simulate a real + # service restart as the manager object will not be recreated after a + # service.stop() and service.start() therefore the manager state will + # survive. For example the resource tracker will not be recreated after + # a stop start. The service.kill() call cannot help as it deletes + # the service from the DB which is unrealistic and causes that some + # operation that refers to the killed host (e.g. evacuate) fails. + # So this helper method will stop the original service and then starts + # a brand new compute service for the same host and node. This way + # a new ComputeManager instance will be created and initialized during + # the service startup. + compute.stop() + + # this service was running previously, so we have to make sure that + # we restart it in the same cell + cell_name = self.host_mappings[compute.host].cell_mapping.name + + old_connection = compute.manager.driver._get_connection() + + self.start_compute( + hostname, host_info, pci_info, mdev_info, vdpa_info, + libvirt_version, qemu_version, cell_name, + old_connection if keep_hypervisor_state else None + ) + + return self.computes[hostname] + class LibvirtMigrationMixin(object): """A simple mixin to facilliate successful libvirt live migrations @@ -392,6 +486,22 @@ class LibvirtNeutronFixture(nova_fixtures.NeutronFixture): 'binding:vnic_type': 'remote-managed', } + network_4_port_pf = { + 'id': 'c6f51315-9202-416f-9e2f-eb78b3ac36d9', + 'network_id': network_4['id'], + 'status': 'ACTIVE', + 'mac_address': 'b5:bc:2e:e7:51:01', + 'fixed_ips': [ + { + 'ip_address': '192.168.4.8', + 'subnet_id': subnet_4['id'] + } + ], + 'binding:vif_details': {'vlan': 42}, + 'binding:vif_type': 'hostdev_physical', + 'binding:vnic_type': 'direct-physical', + } + def __init__(self, test): super(LibvirtNeutronFixture, self).__init__(test) self._networks = { diff --git a/nova/tests/functional/libvirt/test_device_bus_migration.py b/nova/tests/functional/libvirt/test_device_bus_migration.py index 82a0d4556e..3852e31c68 100644 --- a/nova/tests/functional/libvirt/test_device_bus_migration.py +++ b/nova/tests/functional/libvirt/test_device_bus_migration.py @@ -51,7 +51,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase): def _assert_stashed_image_properties_persist(self, server, properties): # Assert the stashed properties persist across a host reboot - self.restart_compute_service(self.compute) + self.restart_compute_service(self.compute_hostname) self._assert_stashed_image_properties(server['id'], properties) # Assert the stashed properties persist across a guest reboot @@ -173,7 +173,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase): self.flags(pointer_model='ps2mouse') # Restart compute to pick up ps2 setting, which means the guest will # not get a prescribed pointer device - self.restart_compute_service(self.compute) + self.restart_compute_service(self.compute_hostname) # Create a server with default image properties default_image_properties1 = { @@ -187,7 +187,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase): # Assert the defaults persist across a host flag change self.flags(pointer_model='usbtablet') # Restart compute to pick up usb setting - self.restart_compute_service(self.compute) + self.restart_compute_service(self.compute_hostname) self._assert_stashed_image_properties( server1['id'], default_image_properties1) @@ -216,7 +216,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase): # https://bugs.launchpad.net/nova/+bug/1866106 self.flags(pointer_model=None) # Restart compute to pick up None setting - self.restart_compute_service(self.compute) + self.restart_compute_service(self.compute_hostname) self._assert_stashed_image_properties( server1['id'], default_image_properties1) self._assert_stashed_image_properties( diff --git a/nova/tests/functional/libvirt/test_numa_live_migration.py b/nova/tests/functional/libvirt/test_numa_live_migration.py index 2f3897d6b2..0e504d2df2 100644 --- a/nova/tests/functional/libvirt/test_numa_live_migration.py +++ b/nova/tests/functional/libvirt/test_numa_live_migration.py @@ -206,10 +206,8 @@ class NUMALiveMigrationPositiveTests(NUMALiveMigrationPositiveBase): # Increase cpu_dedicated_set to 0-3, expecting the live migrated server # to end up on 2,3. self.flags(cpu_dedicated_set='0-3', group='compute') - self.computes['host_a'] = self.restart_compute_service( - self.computes['host_a']) - self.computes['host_b'] = self.restart_compute_service( - self.computes['host_b']) + self.restart_compute_service('host_a') + self.restart_compute_service('host_b') # Live migrate, RPC-pinning the destination host if asked if pin_dest: @@ -333,10 +331,8 @@ class NUMALiveMigrationRollbackTests(NUMALiveMigrationPositiveBase): # Increase cpu_dedicated_set to 0-3, expecting the live migrated server # to end up on 2,3. self.flags(cpu_dedicated_set='0-3', group='compute') - self.computes['host_a'] = self.restart_compute_service( - self.computes['host_a']) - self.computes['host_b'] = self.restart_compute_service( - self.computes['host_b']) + self.restart_compute_service('host_a') + self.restart_compute_service('host_b') # Live migrate, RPC-pinning the destination host if asked. This is a # rollback test, so server_a is expected to remain on host_a. diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py index fd09a11e20..8fd9729404 100644 --- a/nova/tests/functional/libvirt/test_numa_servers.py +++ b/nova/tests/functional/libvirt/test_numa_servers.py @@ -1187,10 +1187,8 @@ class ReshapeForPCPUsTest(NUMAServersTestBase): self.flags(cpu_dedicated_set='0-7', group='compute') self.flags(vcpu_pin_set=None) - computes = {} - for host, compute in self.computes.items(): - computes[host] = self.restart_compute_service(compute) - self.computes = computes + for host in list(self.computes.keys()): + self.restart_compute_service(host) # verify that the inventory, usages and allocation are correct after # the reshape diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py index c98a7534d1..6e5165134b 100644 --- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py +++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py @@ -28,6 +28,7 @@ from oslo_utils import units import nova from nova import context +from nova import exception from nova.network import constants from nova import objects from nova.objects import fields @@ -366,31 +367,66 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): expect_fail=False): # The purpose here is to force an observable PCI slot update when # moving from source to dest. This is accomplished by having a single - # PCI device on the source, 2 PCI devices on the test, and relying on - # the fact that our fake HostPCIDevicesInfo creates predictable PCI - # addresses. The PCI device on source and the first PCI device on dest - # will have identical PCI addresses. By sticking a "placeholder" - # instance on that first PCI device on the dest, the incoming instance - # from source will be forced to consume the second dest PCI device, - # with a different PCI address. + # PCI VF device on the source, 2 PCI VF devices on the dest, and + # relying on the fact that our fake HostPCIDevicesInfo creates + # predictable PCI addresses. The PCI VF device on source and the first + # PCI VF device on dest will have identical PCI addresses. By sticking + # a "placeholder" instance on that first PCI VF device on the dest, the + # incoming instance from source will be forced to consume the second + # dest PCI VF device, with a different PCI address. + # We want to test server operations with SRIOV VFs and SRIOV PFs so + # the config of the compute hosts also have one extra PCI PF devices + # without any VF children. But the two compute has different PCI PF + # addresses and MAC so that the test can observe the slot update as + # well as the MAC updated during migration and after revert. + source_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1) + # add an extra PF without VF to be used by direct-physical ports + source_pci_info.add_device( + dev_type='PF', + bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default + slot=0x0, + function=0, + iommu_group=42, + numa_node=0, + vf_ratio=0, + mac_address='b4:96:91:34:f4:aa', + ) self.start_compute( hostname='source', - pci_info=fakelibvirt.HostPCIDevicesInfo( - num_pfs=1, num_vfs=1)) + pci_info=source_pci_info) + + dest_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2) + # add an extra PF without VF to be used by direct-physical ports + dest_pci_info.add_device( + dev_type='PF', + bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default + slot=0x6, # make it different from the source host + function=0, + iommu_group=42, + numa_node=0, + vf_ratio=0, + mac_address='b4:96:91:34:f4:bb', + ) self.start_compute( hostname='dest', - pci_info=fakelibvirt.HostPCIDevicesInfo( - num_pfs=1, num_vfs=2)) + pci_info=dest_pci_info) source_port = self.neutron.create_port( {'port': self.neutron.network_4_port_1}) + source_pf_port = self.neutron.create_port( + {'port': self.neutron.network_4_port_pf}) dest_port1 = self.neutron.create_port( {'port': self.neutron.network_4_port_2}) dest_port2 = self.neutron.create_port( {'port': self.neutron.network_4_port_3}) source_server = self._create_server( - networks=[{'port': source_port['port']['id']}], host='source') + networks=[ + {'port': source_port['port']['id']}, + {'port': source_pf_port['port']['id']} + ], + host='source', + ) dest_server1 = self._create_server( networks=[{'port': dest_port1['port']['id']}], host='dest') dest_server2 = self._create_server( @@ -398,6 +434,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): # Refresh the ports. source_port = self.neutron.show_port(source_port['port']['id']) + source_pf_port = self.neutron.show_port(source_pf_port['port']['id']) dest_port1 = self.neutron.show_port(dest_port1['port']['id']) dest_port2 = self.neutron.show_port(dest_port2['port']['id']) @@ -413,11 +450,24 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): same_slot_port = dest_port2 self._delete_server(dest_server1) - # Before moving, explictly assert that the servers on source and dest + # Before moving, explicitly assert that the servers on source and dest # have the same pci_slot in their port's binding profile self.assertEqual(source_port['port']['binding:profile']['pci_slot'], same_slot_port['port']['binding:profile']['pci_slot']) + # Assert that the direct-physical port got the pci_slot information + # according to the source host PF PCI device. + self.assertEqual( + '0000:82:00.0', # which is in sync with the source host pci_info + source_pf_port['port']['binding:profile']['pci_slot'] + ) + # Assert that the direct-physical port is updated with the MAC address + # of the PF device from the source host + self.assertEqual( + 'b4:96:91:34:f4:aa', + source_pf_port['port']['binding:profile']['device_mac_address'] + ) + # Before moving, assert that the servers on source and dest have the # same PCI source address in their XML for their SRIOV nic. source_conn = self.computes['source'].driver._host.get_connection() @@ -434,14 +484,28 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): move_operation(source_server) # Refresh the ports again, keeping in mind the source_port is now bound - # on the dest after unshelving. + # on the dest after the move. source_port = self.neutron.show_port(source_port['port']['id']) same_slot_port = self.neutron.show_port(same_slot_port['port']['id']) + source_pf_port = self.neutron.show_port(source_pf_port['port']['id']) self.assertNotEqual( source_port['port']['binding:profile']['pci_slot'], same_slot_port['port']['binding:profile']['pci_slot']) + # Assert that the direct-physical port got the pci_slot information + # according to the dest host PF PCI device. + self.assertEqual( + '0000:82:06.0', # which is in sync with the dest host pci_info + source_pf_port['port']['binding:profile']['pci_slot'] + ) + # Assert that the direct-physical port is updated with the MAC address + # of the PF device from the dest host + self.assertEqual( + 'b4:96:91:34:f4:bb', + source_pf_port['port']['binding:profile']['device_mac_address'] + ) + conn = self.computes['dest'].driver._host.get_connection() vms = [vm._def for vm in conn._vms.values()] self.assertEqual(2, len(vms)) @@ -469,6 +533,169 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): self._confirm_resize(source_server) self._test_move_operation_with_neutron(move_operation) + def test_cold_migrate_and_rever_server_with_neutron(self): + # The purpose here is to force an observable PCI slot update when + # moving from source to dest and the from dest to source after the + # revert. This is accomplished by having a single + # PCI VF device on the source, 2 PCI VF devices on the dest, and + # relying on the fact that our fake HostPCIDevicesInfo creates + # predictable PCI addresses. The PCI VF device on source and the first + # PCI VF device on dest will have identical PCI addresses. By sticking + # a "placeholder" instance on that first PCI VF device on the dest, the + # incoming instance from source will be forced to consume the second + # dest PCI VF device, with a different PCI address. + # We want to test server operations with SRIOV VFs and SRIOV PFs so + # the config of the compute hosts also have one extra PCI PF devices + # without any VF children. But the two compute has different PCI PF + # addresses and MAC so that the test can observe the slot update as + # well as the MAC updated during migration and after revert. + source_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1) + # add an extra PF without VF to be used by direct-physical ports + source_pci_info.add_device( + dev_type='PF', + bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default + slot=0x0, + function=0, + iommu_group=42, + numa_node=0, + vf_ratio=0, + mac_address='b4:96:91:34:f4:aa', + ) + self.start_compute( + hostname='source', + pci_info=source_pci_info) + dest_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2) + # add an extra PF without VF to be used by direct-physical ports + dest_pci_info.add_device( + dev_type='PF', + bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default + slot=0x6, # make it different from the source host + function=0, + iommu_group=42, + numa_node=0, + vf_ratio=0, + mac_address='b4:96:91:34:f4:bb', + ) + self.start_compute( + hostname='dest', + pci_info=dest_pci_info) + source_port = self.neutron.create_port( + {'port': self.neutron.network_4_port_1}) + source_pf_port = self.neutron.create_port( + {'port': self.neutron.network_4_port_pf}) + dest_port1 = self.neutron.create_port( + {'port': self.neutron.network_4_port_2}) + dest_port2 = self.neutron.create_port( + {'port': self.neutron.network_4_port_3}) + source_server = self._create_server( + networks=[ + {'port': source_port['port']['id']}, + {'port': source_pf_port['port']['id']} + ], + host='source', + ) + dest_server1 = self._create_server( + networks=[{'port': dest_port1['port']['id']}], host='dest') + dest_server2 = self._create_server( + networks=[{'port': dest_port2['port']['id']}], host='dest') + # Refresh the ports. + source_port = self.neutron.show_port(source_port['port']['id']) + source_pf_port = self.neutron.show_port(source_pf_port['port']['id']) + dest_port1 = self.neutron.show_port(dest_port1['port']['id']) + dest_port2 = self.neutron.show_port(dest_port2['port']['id']) + # Find the server on the dest compute that's using the same pci_slot as + # the server on the source compute, and delete the other one to make + # room for the incoming server from the source. + source_pci_slot = source_port['port']['binding:profile']['pci_slot'] + dest_pci_slot1 = dest_port1['port']['binding:profile']['pci_slot'] + if dest_pci_slot1 == source_pci_slot: + same_slot_port = dest_port1 + self._delete_server(dest_server2) + else: + same_slot_port = dest_port2 + self._delete_server(dest_server1) + # Before moving, explicitly assert that the servers on source and dest + # have the same pci_slot in their port's binding profile + self.assertEqual(source_port['port']['binding:profile']['pci_slot'], + same_slot_port['port']['binding:profile']['pci_slot']) + # Assert that the direct-physical port got the pci_slot information + # according to the source host PF PCI device. + self.assertEqual( + '0000:82:00.0', # which is in sync with the source host pci_info + source_pf_port['port']['binding:profile']['pci_slot'] + ) + # Assert that the direct-physical port is updated with the MAC address + # of the PF device from the source host + self.assertEqual( + 'b4:96:91:34:f4:aa', + source_pf_port['port']['binding:profile']['device_mac_address'] + ) + # Before moving, assert that the servers on source and dest have the + # same PCI source address in their XML for their SRIOV nic. + source_conn = self.computes['source'].driver._host.get_connection() + dest_conn = self.computes['source'].driver._host.get_connection() + source_vms = [vm._def for vm in source_conn._vms.values()] + dest_vms = [vm._def for vm in dest_conn._vms.values()] + self.assertEqual(1, len(source_vms)) + self.assertEqual(1, len(dest_vms)) + self.assertEqual(1, len(source_vms[0]['devices']['nics'])) + self.assertEqual(1, len(dest_vms[0]['devices']['nics'])) + self.assertEqual(source_vms[0]['devices']['nics'][0]['source'], + dest_vms[0]['devices']['nics'][0]['source']) + + # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should + # probably be less...dumb + with mock.patch('nova.virt.libvirt.driver.LibvirtDriver' + '.migrate_disk_and_power_off', return_value='{}'): + self._migrate_server(source_server) + + # Refresh the ports again, keeping in mind the ports are now bound + # on the dest after migrating. + source_port = self.neutron.show_port(source_port['port']['id']) + same_slot_port = self.neutron.show_port(same_slot_port['port']['id']) + source_pf_port = self.neutron.show_port(source_pf_port['port']['id']) + self.assertNotEqual( + source_port['port']['binding:profile']['pci_slot'], + same_slot_port['port']['binding:profile']['pci_slot']) + # Assert that the direct-physical port got the pci_slot information + # according to the dest host PF PCI device. + self.assertEqual( + '0000:82:06.0', # which is in sync with the dest host pci_info + source_pf_port['port']['binding:profile']['pci_slot'] + ) + # Assert that the direct-physical port is updated with the MAC address + # of the PF device from the dest host + self.assertEqual( + 'b4:96:91:34:f4:bb', + source_pf_port['port']['binding:profile']['device_mac_address'] + ) + conn = self.computes['dest'].driver._host.get_connection() + vms = [vm._def for vm in conn._vms.values()] + self.assertEqual(2, len(vms)) + for vm in vms: + self.assertEqual(1, len(vm['devices']['nics'])) + self.assertNotEqual(vms[0]['devices']['nics'][0]['source'], + vms[1]['devices']['nics'][0]['source']) + + self._revert_resize(source_server) + + # Refresh the ports again, keeping in mind the ports are now bound + # on the source as the migration is reverted + source_pf_port = self.neutron.show_port(source_pf_port['port']['id']) + + # Assert that the direct-physical port got the pci_slot information + # according to the source host PF PCI device. + self.assertEqual( + '0000:82:00.0', # which is in sync with the source host pci_info + source_pf_port['port']['binding:profile']['pci_slot'] + ) + # Assert that the direct-physical port is updated with the MAC address + # of the PF device from the source host + self.assertEqual( + 'b4:96:91:34:f4:aa', + source_pf_port['port']['binding:profile']['device_mac_address'] + ) + def test_evacuate_server_with_neutron(self): def move_operation(source_server): # Down the source compute to enable the evacuation @@ -486,17 +713,44 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): """ # start two compute services with differing PCI device inventory - self.start_compute( - hostname='test_compute0', - pci_info=fakelibvirt.HostPCIDevicesInfo( - num_pfs=2, num_vfs=8, numa_node=0)) - self.start_compute( - hostname='test_compute1', - pci_info=fakelibvirt.HostPCIDevicesInfo( - num_pfs=1, num_vfs=2, numa_node=1)) + source_pci_info = fakelibvirt.HostPCIDevicesInfo( + num_pfs=2, num_vfs=8, numa_node=0) + # add an extra PF without VF to be used by direct-physical ports + source_pci_info.add_device( + dev_type='PF', + bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default + slot=0x0, + function=0, + iommu_group=42, + numa_node=0, + vf_ratio=0, + mac_address='b4:96:91:34:f4:aa', + ) + self.start_compute(hostname='test_compute0', pci_info=source_pci_info) - # create the port - self.neutron.create_port({'port': self.neutron.network_4_port_1}) + dest_pci_info = fakelibvirt.HostPCIDevicesInfo( + num_pfs=1, num_vfs=2, numa_node=1) + # add an extra PF without VF to be used by direct-physical ports + dest_pci_info.add_device( + dev_type='PF', + bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default + slot=0x6, # make it different from the source host + function=0, + iommu_group=42, + # numa node needs to be aligned with the other pci devices in this + # host as the instance needs to fit into a single host numa node + numa_node=1, + vf_ratio=0, + mac_address='b4:96:91:34:f4:bb', + ) + + self.start_compute(hostname='test_compute1', pci_info=dest_pci_info) + + # create the ports + port = self.neutron.create_port( + {'port': self.neutron.network_4_port_1})['port'] + pf_port = self.neutron.create_port( + {'port': self.neutron.network_4_port_pf})['port'] # create a server using the VF via neutron extra_spec = {'hw:cpu_policy': 'dedicated'} @@ -504,7 +758,8 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): server = self._create_server( flavor_id=flavor_id, networks=[ - {'port': base.LibvirtNeutronFixture.network_4_port_1['id']}, + {'port': port['id']}, + {'port': pf_port['id']}, ], host='test_compute0', ) @@ -512,8 +767,8 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): # our source host should have marked two PCI devices as used, the VF # and the parent PF, while the future destination is currently unused self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host']) - self.assertPCIDeviceCounts('test_compute0', total=10, free=8) - self.assertPCIDeviceCounts('test_compute1', total=3, free=3) + self.assertPCIDeviceCounts('test_compute0', total=11, free=8) + self.assertPCIDeviceCounts('test_compute1', total=4, free=4) # the instance should be on host NUMA node 0, since that's where our # PCI devices are @@ -544,13 +799,26 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): port['binding:profile'], ) + # ensure the binding details sent to "neutron" are correct + pf_port = self.neutron.show_port(pf_port['id'],)['port'] + self.assertIn('binding:profile', pf_port) + self.assertEqual( + { + 'pci_vendor_info': '8086:1528', + 'pci_slot': '0000:82:00.0', + 'physical_network': 'physnet4', + 'device_mac_address': 'b4:96:91:34:f4:aa', + }, + pf_port['binding:profile'], + ) + # now live migrate that server self._live_migrate(server, 'completed') # we should now have transitioned our usage to the destination, freeing # up the source in the process - self.assertPCIDeviceCounts('test_compute0', total=10, free=10) - self.assertPCIDeviceCounts('test_compute1', total=3, free=1) + self.assertPCIDeviceCounts('test_compute0', total=11, free=11) + self.assertPCIDeviceCounts('test_compute1', total=4, free=1) # the instance should now be on host NUMA node 1, since that's where # our PCI devices are for this second host @@ -577,6 +845,18 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): }, port['binding:profile'], ) + # ensure the binding details sent to "neutron" are correct + pf_port = self.neutron.show_port(pf_port['id'],)['port'] + self.assertIn('binding:profile', pf_port) + self.assertEqual( + { + 'pci_vendor_info': '8086:1528', + 'pci_slot': '0000:82:06.0', + 'physical_network': 'physnet4', + 'device_mac_address': 'b4:96:91:34:f4:bb', + }, + pf_port['binding:profile'], + ) def test_get_server_diagnostics_server_with_VF(self): """Ensure server disagnostics include info on VF-type PCI devices.""" @@ -635,11 +915,8 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): # Disable SRIOV capabilties in PF and delete the VFs self._disable_sriov_in_pf(pci_info_no_sriov) - fake_connection = self._get_connection(pci_info=pci_info_no_sriov, - hostname='test_compute0') - self.mock_conn.return_value = fake_connection - - self.compute = self.start_service('compute', host='test_compute0') + self.start_compute('test_compute0', pci_info=pci_info_no_sriov) + self.compute = self.computes['test_compute0'] ctxt = context.get_admin_context() pci_devices = objects.PciDeviceList.get_by_compute_node( @@ -651,13 +928,9 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): self.assertEqual(1, len(pci_devices)) self.assertEqual('type-PCI', pci_devices[0].dev_type) - # Update connection with original pci info with sriov PFs - fake_connection = self._get_connection(pci_info=pci_info, - hostname='test_compute0') - self.mock_conn.return_value = fake_connection - - # Restart the compute service - self.restart_compute_service(self.compute) + # Restart the compute service with sriov PFs + self.restart_compute_service( + self.compute.host, pci_info=pci_info, keep_hypervisor_state=False) # Verify if PCI devices are of type type-PF or type-VF pci_devices = objects.PciDeviceList.get_by_compute_node( @@ -679,6 +952,88 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase): ], ) + def test_change_bound_port_vnic_type_kills_compute_at_restart(self): + """Create a server with a direct port and change the vnic_type of the + bound port to macvtap. Then restart the compute service. + + As the vnic_type is changed on the port but the vif_type is hwveb + instead of macvtap the vif plug logic will try to look up the netdev + of the parent VF. Howvere that VF consumed by the instance so the + netdev does not exists. This causes that the compute service will fail + with an exception during startup + """ + pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2) + self.start_compute(pci_info=pci_info) + + # create a direct port + port = self.neutron.network_4_port_1 + self.neutron.create_port({'port': port}) + + # create a server using the VF via neutron + server = self._create_server(networks=[{'port': port['id']}]) + + # update the vnic_type of the port in neutron + port = copy.deepcopy(port) + port['binding:vnic_type'] = 'macvtap' + self.neutron.update_port(port['id'], {"port": port}) + + compute = self.computes['compute1'] + + # Force an update on the instance info cache to ensure nova gets the + # information about the updated port + with context.target_cell( + context.get_admin_context(), + self.host_mappings['compute1'].cell_mapping + ) as cctxt: + compute.manager._heal_instance_info_cache(cctxt) + self.assertIn( + 'The vnic_type of the bound port %s has been changed in ' + 'neutron from "direct" to "macvtap". Changing vnic_type of a ' + 'bound port is not supported by Nova. To avoid breaking the ' + 'connectivity of the instance please change the port ' + 'vnic_type back to "direct".' % port['id'], + self.stdlog.logger.output, + ) + + def fake_get_ifname_by_pci_address(pci_addr: str, pf_interface=False): + # we want to fail the netdev lookup only if the pci_address is + # already consumed by our instance. So we look into the instance + # definition to see if the device is attached to the instance as VF + conn = compute.manager.driver._host.get_connection() + dom = conn.lookupByUUIDString(server['id']) + dev = dom._def['devices']['nics'][0] + lookup_addr = pci_addr.replace(':', '_').replace('.', '_') + if ( + dev['type'] == 'hostdev' and + dev['source'] == 'pci_' + lookup_addr + ): + # nova tried to look up the netdev of an already consumed VF. + # So we have to fail + raise exception.PciDeviceNotFoundById(id=pci_addr) + + # We need to simulate the actual failure manually as in our functional + # environment all the PCI lookup is mocked. In reality nova tries to + # look up the netdev of the pci device on the host used by the port as + # the parent of the macvtap. However, as the originally direct port is + # bound to the instance, the VF pci device is already consumed by the + # instance and therefore there is no netdev for the VF. + with mock.patch( + 'nova.pci.utils.get_ifname_by_pci_address', + side_effect=fake_get_ifname_by_pci_address, + ): + # Nova cannot prevent the vnic_type change on a bound port. Neutron + # should prevent that instead. But the nova-compute should still + # be able to start up and only log an ERROR for this instance in + # inconsistent state. + self.restart_compute_service('compute1') + + self.assertIn( + 'Virtual interface plugging failed for instance. Probably the ' + 'vnic_type of the bound port has been changed. Nova does not ' + 'support such change.', + self.stdlog.logger.output, + ) + class SRIOVAttachDetachTest(_PCIServersTestBase): # no need for aliases as these test will request SRIOV via neutron @@ -742,10 +1097,9 @@ class SRIOVAttachDetachTest(_PCIServersTestBase): host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2) pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1) - fake_connection = self._get_connection(host_info, pci_info) - self.mock_conn.return_value = fake_connection - - self.compute = self.start_service('compute', host='test_compute0') + self.start_compute( + 'test_compute0', host_info=host_info, pci_info=pci_info) + self.compute = self.computes['test_compute0'] # Create server with a port server = self._create_server(networks=[{'port': first_port_id}]) @@ -834,7 +1188,7 @@ class VDPAServersTest(_PCIServersTestBase): # fixture already stubbed. self.neutron = self.useFixture(base.LibvirtNeutronFixture(self)) - def start_compute(self): + def start_vdpa_compute(self, hostname='compute-0'): vf_ratio = self.NUM_VFS // self.NUM_PFS pci_info = fakelibvirt.HostPCIDevicesInfo( @@ -872,7 +1226,7 @@ class VDPAServersTest(_PCIServersTestBase): driver_name='mlx5_core') vdpa_info.add_device(f'vdpa_vdpa{idx}', idx, vf) - return super().start_compute( + return super().start_compute(hostname=hostname, pci_info=pci_info, vdpa_info=vdpa_info, libvirt_version=self.FAKE_LIBVIRT_VERSION, qemu_version=self.FAKE_QEMU_VERSION) @@ -927,7 +1281,7 @@ class VDPAServersTest(_PCIServersTestBase): fake_create, ) - hostname = self.start_compute() + hostname = self.start_vdpa_compute() num_pci = self.NUM_PFS + self.NUM_VFS # both the PF and VF with vDPA capabilities (dev_type=vdpa) should have @@ -960,12 +1314,16 @@ class VDPAServersTest(_PCIServersTestBase): port['binding:profile'], ) - def _test_common(self, op, *args, **kwargs): - self.start_compute() - + def _create_port_and_server(self): # create the port and a server, with the port attached to the server vdpa_port = self.create_vdpa_port() server = self._create_server(networks=[{'port': vdpa_port['id']}]) + return vdpa_port, server + + def _test_common(self, op, *args, **kwargs): + self.start_vdpa_compute() + + vdpa_port, server = self._create_port_and_server() # attempt the unsupported action and ensure it fails ex = self.assertRaises( @@ -976,13 +1334,11 @@ class VDPAServersTest(_PCIServersTestBase): ex.response.text) def test_attach_interface(self): - self.start_compute() - + self.start_vdpa_compute() # create the port and a server, but don't attach the port to the server # yet vdpa_port = self.create_vdpa_port() server = self._create_server(networks='none') - # attempt to attach the port to the server ex = self.assertRaises( client.OpenStackApiException, @@ -994,21 +1350,282 @@ class VDPAServersTest(_PCIServersTestBase): def test_detach_interface(self): self._test_common(self._detach_interface, uuids.vdpa_port) - def test_shelve(self): - self._test_common(self._shelve_server) + def test_shelve_offload(self): + hostname = self.start_vdpa_compute() + vdpa_port, server = self._create_port_and_server() + # assert the port is bound to the vm and the compute host + port = self.neutron.show_port(vdpa_port['id'])['port'] + self.assertEqual(server['id'], port['device_id']) + self.assertEqual(hostname, port['binding:host_id']) + num_pci = self.NUM_PFS + self.NUM_VFS + # -2 we claim the vdpa device which make the parent PF unavailable + self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2) + server = self._shelve_server(server) + # now that the vm is shelve offloaded it should not be bound + # to any host but should still be owned by the vm + port = self.neutron.show_port(vdpa_port['id'])['port'] + self.assertEqual(server['id'], port['device_id']) + # FIXME(sean-k-mooney): we should be unbinding the port from + # the host when we shelve offload but we don't today. + # This is unrelated to vdpa port and is a general issue. + self.assertEqual(hostname, port['binding:host_id']) + self.assertIn('binding:profile', port) + self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + self.assertIsNone(server['OS-EXT-SRV-ATTR:host']) + self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci) - def test_suspend(self): - self._test_common(self._suspend_server) + def test_unshelve_to_same_host(self): + hostname = self.start_vdpa_compute() + num_pci = self.NUM_PFS + self.NUM_VFS + self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci) + + vdpa_port, server = self._create_port_and_server() + self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2) + self.assertEqual( + hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + port = self.neutron.show_port(vdpa_port['id'])['port'] + self.assertEqual(hostname, port['binding:host_id']) + + server = self._shelve_server(server) + self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci) + self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + port = self.neutron.show_port(vdpa_port['id'])['port'] + # FIXME(sean-k-mooney): shelve offload should unbind the port + # self.assertEqual('', port['binding:host_id']) + self.assertEqual(hostname, port['binding:host_id']) + + server = self._unshelve_server(server) + self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2) + self.assertEqual( + hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + port = self.neutron.show_port(vdpa_port['id'])['port'] + self.assertEqual(hostname, port['binding:host_id']) + + def test_unshelve_to_different_host(self): + source = self.start_vdpa_compute(hostname='source') + dest = self.start_vdpa_compute(hostname='dest') + + num_pci = self.NUM_PFS + self.NUM_VFS + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci) + + # ensure we boot the vm on the "source" compute + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'disabled'}) + vdpa_port, server = self._create_port_and_server() + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + self.assertEqual( + source, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + port = self.neutron.show_port(vdpa_port['id'])['port'] + self.assertEqual(source, port['binding:host_id']) + + server = self._shelve_server(server) + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + port = self.neutron.show_port(vdpa_port['id'])['port'] + # FIXME(sean-k-mooney): shelve should unbind the port + # self.assertEqual('', port['binding:host_id']) + self.assertEqual(source, port['binding:host_id']) + + # force the unshelve to the other host + self.api.put_service( + self.computes['source'].service_ref.uuid, {'status': 'disabled'}) + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'enabled'}) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci) + server = self._unshelve_server(server) + # the dest devices should be claimed + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2) + # and the source host devices should still be free + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + self.assertEqual( + dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + port = self.neutron.show_port(vdpa_port['id'])['port'] + self.assertEqual(dest, port['binding:host_id']) def test_evacute(self): - self._test_common(self._evacuate_server) + source = self.start_vdpa_compute(hostname='source') + dest = self.start_vdpa_compute(hostname='dest') - def test_resize(self): - flavor_id = self._create_flavor() - self._test_common(self._resize_server, flavor_id) + num_pci = self.NUM_PFS + self.NUM_VFS + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci) + + # ensure we boot the vm on the "source" compute + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'disabled'}) + vdpa_port, server = self._create_port_and_server() + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + self.assertEqual( + source, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + port = self.neutron.show_port(vdpa_port['id'])['port'] + self.assertEqual(source, port['binding:host_id']) + + # stop the source compute and enable the dest + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'enabled'}) + self.computes['source'].stop() + # Down the source compute to enable the evacuation + self.api.put_service( + self.computes['source'].service_ref.uuid, {'forced_down': True}) + + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci) + server = self._evacuate_server(server) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2) + self.assertEqual( + dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + port = self.neutron.show_port(vdpa_port['id'])['port'] + self.assertEqual(dest, port['binding:host_id']) + + # as the source compute is offline the pci claims will not be cleaned + # up on the source compute. + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + # but if you fix/restart the source node the allocations for evacuated + # instances should be released. + self.restart_compute_service(source) + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + + def test_resize_same_host(self): + self.flags(allow_resize_to_same_host=True) + num_pci = self.NUM_PFS + self.NUM_VFS + source = self.start_vdpa_compute() + vdpa_port, server = self._create_port_and_server() + # before we resize the vm should be using 1 VF but that will mark + # the PF as unavailable so we assert 2 devices are in use. + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + flavor_id = self._create_flavor(name='new-flavor') + self.assertNotEqual(server['flavor']['original_name'], 'new-flavor') + with mock.patch( + 'nova.virt.libvirt.driver.LibvirtDriver' + '.migrate_disk_and_power_off', return_value='{}', + ): + server = self._resize_server(server, flavor_id) + self.assertEqual( + server['flavor']['original_name'], 'new-flavor') + # in resize verify the VF claims should be doubled even + # for same host resize so assert that 3 are in devices in use + # 1 PF and 2 VFs . + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 3) + server = self._confirm_resize(server) + # but once we confrim it should be reduced back to 1 PF and 1 VF + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + # assert the hostname has not have changed as part + # of the resize. + self.assertEqual( + source, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + + def test_resize_different_host(self): + self.flags(allow_resize_to_same_host=False) + source = self.start_vdpa_compute(hostname='source') + dest = self.start_vdpa_compute(hostname='dest') + + num_pci = self.NUM_PFS + self.NUM_VFS + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci) + + # ensure we boot the vm on the "source" compute + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'disabled'}) + vdpa_port, server = self._create_port_and_server() + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + flavor_id = self._create_flavor(name='new-flavor') + self.assertNotEqual(server['flavor']['original_name'], 'new-flavor') + # disable the source compute and enable the dest + self.api.put_service( + self.computes['source'].service_ref.uuid, {'status': 'disabled'}) + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'enabled'}) + with mock.patch( + 'nova.virt.libvirt.driver.LibvirtDriver' + '.migrate_disk_and_power_off', return_value='{}', + ): + server = self._resize_server(server, flavor_id) + self.assertEqual( + server['flavor']['original_name'], 'new-flavor') + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2) + server = self._confirm_resize(server) + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2) + self.assertEqual( + dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + + def test_resize_revert(self): + self.flags(allow_resize_to_same_host=False) + source = self.start_vdpa_compute(hostname='source') + dest = self.start_vdpa_compute(hostname='dest') + + num_pci = self.NUM_PFS + self.NUM_VFS + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci) + + # ensure we boot the vm on the "source" compute + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'disabled'}) + vdpa_port, server = self._create_port_and_server() + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + flavor_id = self._create_flavor(name='new-flavor') + self.assertNotEqual(server['flavor']['original_name'], 'new-flavor') + # disable the source compute and enable the dest + self.api.put_service( + self.computes['source'].service_ref.uuid, {'status': 'disabled'}) + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'enabled'}) + with mock.patch( + 'nova.virt.libvirt.driver.LibvirtDriver' + '.migrate_disk_and_power_off', return_value='{}', + ): + server = self._resize_server(server, flavor_id) + self.assertEqual( + server['flavor']['original_name'], 'new-flavor') + # in resize verify both the dest and source pci claims should be + # present. + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2) + server = self._revert_resize(server) + # but once we revert the dest claims should be freed. + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci) + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + self.assertEqual( + source, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) def test_cold_migrate(self): - self._test_common(self._migrate_server) + source = self.start_vdpa_compute(hostname='source') + dest = self.start_vdpa_compute(hostname='dest') + + num_pci = self.NUM_PFS + self.NUM_VFS + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci) + + # ensure we boot the vm on the "source" compute + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'disabled'}) + vdpa_port, server = self._create_port_and_server() + self.assertEqual( + source, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + # enable the dest we do not need to disable the source since cold + # migrate wont happen to the same host in the libvirt driver + self.api.put_service( + self.computes['dest'].service_ref.uuid, {'status': 'enabled'}) + with mock.patch( + 'nova.virt.libvirt.driver.LibvirtDriver' + '.migrate_disk_and_power_off', return_value='{}', + ): + server = self._migrate_server(server) + self.assertEqual( + dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2) + server = self._confirm_resize(server) + self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci) + self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2) + self.assertEqual( + dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname']) + + def test_suspend(self): + self._test_common(self._suspend_server) class PCIServersTest(_PCIServersTestBase): diff --git a/nova/tests/functional/libvirt/test_reshape.py b/nova/tests/functional/libvirt/test_reshape.py index 5c73ffbf5f..d0102f1247 100644 --- a/nova/tests/functional/libvirt/test_reshape.py +++ b/nova/tests/functional/libvirt/test_reshape.py @@ -30,17 +30,7 @@ LOG = logging.getLogger(__name__) class VGPUReshapeTests(base.ServersTestBase): - @mock.patch('nova.virt.libvirt.LibvirtDriver._get_local_gb_info', - return_value={'total': 128, - 'used': 44, - 'free': 84}) - @mock.patch('nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname', - return_value=True) - @mock.patch('nova.virt.libvirt.driver.libvirt_utils.file_open', - side_effect=[io.BytesIO(b''), io.BytesIO(b''), - io.BytesIO(b'')]) - def test_create_servers_with_vgpu( - self, mock_file_open, mock_valid_hostname, mock_get_fs_info): + def test_create_servers_with_vgpu(self): """Verify that vgpu reshape works with libvirt driver 1) create two servers with an old tree where the VGPU resource is on @@ -49,7 +39,8 @@ class VGPUReshapeTests(base.ServersTestBase): 3) check that the allocations of the servers are still valid 4) create another server now against the new tree """ - + self.mock_file_open.side_effect = [ + io.BytesIO(b''), io.BytesIO(b''), io.BytesIO(b'')] # NOTE(gibi): We cannot simply ask the virt driver to create an old # RP tree with vgpu on the root RP as that code path does not exist # any more. So we have to hack a "bit". We will create a compute @@ -81,11 +72,11 @@ class VGPUReshapeTests(base.ServersTestBase): # ignore the content of the above HostMdevDeviceInfo self.flags(enabled_mdev_types='', group='devices') - hostname = self.start_compute( + self.hostname = self.start_compute( hostname='compute1', mdev_info=fakelibvirt.HostMdevDevicesInfo(devices=mdevs), ) - self.compute = self.computes[hostname] + self.compute = self.computes[self.hostname] # create the VGPU resource in placement manually compute_rp_uuid = self.placement.get( @@ -167,7 +158,7 @@ class VGPUReshapeTests(base.ServersTestBase): allocations[compute_rp_uuid]['resources']) # restart compute which will trigger a reshape - self.compute = self.restart_compute_service(self.compute) + self.compute = self.restart_compute_service(self.hostname) # verify that the inventory, usages and allocation are correct after # the reshape diff --git a/nova/tests/functional/libvirt/test_vgpu.py b/nova/tests/functional/libvirt/test_vgpu.py index f25ce44221..686582120a 100644 --- a/nova/tests/functional/libvirt/test_vgpu.py +++ b/nova/tests/functional/libvirt/test_vgpu.py @@ -49,11 +49,11 @@ class VGPUTestBase(base.ServersTestBase): def setUp(self): super(VGPUTestBase, self).setUp() - self.useFixture(fixtures.MockPatch( - 'nova.virt.libvirt.LibvirtDriver._get_local_gb_info', - return_value={'total': 128, - 'used': 44, - 'free': 84})) + libvirt_driver.LibvirtDriver._get_local_gb_info.return_value = { + 'total': 128, + 'used': 44, + 'free': 84, + } self.useFixture(fixtures.MockPatch( 'nova.privsep.libvirt.create_mdev', side_effect=self._create_mdev)) @@ -113,8 +113,8 @@ class VGPUTestBase(base.ServersTestBase): parent=libvirt_parent)}) return uuid - def start_compute(self, hostname): - hostname = super().start_compute( + def start_compute_with_vgpu(self, hostname): + hostname = self.start_compute( pci_info=fakelibvirt.HostPCIDevicesInfo( num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2, ), @@ -197,7 +197,7 @@ class VGPUTests(VGPUTestBase): enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE, group='devices') - self.compute1 = self.start_compute('host1') + self.compute1 = self.start_compute_with_vgpu('host1') def assert_vgpu_usage_for_compute(self, compute, expected): self.assert_mdev_usage(compute, expected_amount=expected) @@ -211,7 +211,7 @@ class VGPUTests(VGPUTestBase): def test_resize_servers_with_vgpu(self): # Add another compute for the sake of resizing - self.compute2 = self.start_compute('host2') + self.compute2 = self.start_compute_with_vgpu('host2') server = self._create_server( image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', flavor_id=self.flavor, host=self.compute1.host, @@ -337,7 +337,7 @@ class VGPUMultipleTypesTests(VGPUTestBase): # Prepare traits for later on self._create_trait('CUSTOM_NVIDIA_11') self._create_trait('CUSTOM_NVIDIA_12') - self.compute1 = self.start_compute('host1') + self.compute1 = self.start_compute_with_vgpu('host1') def test_create_servers_with_vgpu(self): self._create_server( @@ -369,13 +369,12 @@ class VGPUMultipleTypesTests(VGPUTestBase): def test_create_servers_with_specific_type(self): # Regenerate the PCI addresses so both pGPUs now support nvidia-12 - connection = self.computes[ - self.compute1.host].driver._host.get_connection() - connection.pci_info = fakelibvirt.HostPCIDevicesInfo( + pci_info = fakelibvirt.HostPCIDevicesInfo( num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2, multiple_gpu_types=True) # Make a restart to update the Resource Providers - self.compute1 = self.restart_compute_service(self.compute1) + self.compute1 = self.restart_compute_service( + self.compute1.host, pci_info=pci_info, keep_hypervisor_state=False) pgpu1_rp_uuid = self._get_provider_uuid_by_name( self.compute1.host + '_' + fakelibvirt.MDEVCAP_DEV1_PCI_ADDR) pgpu2_rp_uuid = self._get_provider_uuid_by_name( @@ -451,7 +450,7 @@ class DifferentMdevClassesTests(VGPUTestBase): group='mdev_nvidia-12') self.flags(mdev_class='CUSTOM_NOTVGPU', group='mdev_mlx5_core') - self.compute1 = self.start_compute('host1') + self.compute1 = self.start_compute_with_vgpu('host1') # Regenerate the PCI addresses so they can support both mlx5 and # nvidia-12 types connection = self.computes[ @@ -460,7 +459,7 @@ class DifferentMdevClassesTests(VGPUTestBase): num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2, generic_types=True) # Make a restart to update the Resource Providers - self.compute1 = self.restart_compute_service(self.compute1) + self.compute1 = self.restart_compute_service('host1') def test_create_servers_with_different_mdev_classes(self): physdev1_rp_uuid = self._get_provider_uuid_by_name( @@ -498,7 +497,7 @@ class DifferentMdevClassesTests(VGPUTestBase): def test_resize_servers_with_mlx5(self): # Add another compute for the sake of resizing - self.compute2 = self.start_compute('host2') + self.compute2 = self.start_compute_with_vgpu('host2') # Regenerate the PCI addresses so they can support both mlx5 and # nvidia-12 types connection = self.computes[ @@ -507,7 +506,7 @@ class DifferentMdevClassesTests(VGPUTestBase): num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2, generic_types=True) # Make a restart to update the Resource Providers - self.compute2 = self.restart_compute_service(self.compute2) + self.compute2 = self.restart_compute_service('host2') # Use the new flavor for booting server = self._create_server( diff --git a/nova/tests/functional/libvirt/test_vtpm.py b/nova/tests/functional/libvirt/test_vtpm.py index c07c38f02d..4e9c705052 100644 --- a/nova/tests/functional/libvirt/test_vtpm.py +++ b/nova/tests/functional/libvirt/test_vtpm.py @@ -128,7 +128,7 @@ class VTPMServersTest(base.ServersTestBase): # the presence of users on the host, none of which makes sense here _p = mock.patch( 'nova.virt.libvirt.driver.LibvirtDriver._check_vtpm_support') - self.mock_conn = _p.start() + _p.start() self.addCleanup(_p.stop) self.key_mgr = crypto._get_key_manager() diff --git a/nova/tests/functional/regressions/test_bug_1628606.py b/nova/tests/functional/regressions/test_bug_1628606.py new file mode 100644 index 0000000000..0fccd78cce --- /dev/null +++ b/nova/tests/functional/regressions/test_bug_1628606.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional.api import client +from nova.tests.functional import fixtures as func_fixtures +from nova.tests.functional import integrated_helpers +from unittest import mock + + +class PostLiveMigrationFail( + test.TestCase, integrated_helpers.InstanceHelperMixin): + """Regression test for bug 1628606 + """ + + def setUp(self): + super().setUp() + self.useFixture(nova_fixtures.NeutronFixture(self)) + self.glance = self.useFixture(nova_fixtures.GlanceFixture(self)) + self.useFixture(func_fixtures.PlacementFixture()) + self.useFixture(nova_fixtures.HostNameWeigherFixture()) + + self.start_service('conductor') + self.start_service('scheduler') + + api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')) + + self.api = api_fixture.admin_api + self.api.microversion = 'latest' + + self.src = self._start_compute(host='host1') + self.dest = self._start_compute(host='host2') + + @mock.patch( + 'nova.compute.manager.ComputeManager' + '._post_live_migration_remove_source_vol_connections') + def test_post_live_migration(self, mock_migration): + server = self._create_server(networks=[]) + self.assertEqual(self.src.host, server['OS-EXT-SRV-ATTR:host']) + + error = client.OpenStackApiException( + "Failed to remove source vol connection post live migration") + mock_migration.side_effect = error + + server = self._live_migrate( + server, migration_expected_state='error', + server_expected_state='ERROR') + + self.assertEqual(self.dest.host, server['OS-EXT-SRV-ATTR:host']) diff --git a/nova/tests/functional/regressions/test_bug_1781286.py b/nova/tests/functional/regressions/test_bug_1781286.py index 7b2d603092..bb47eb0ea8 100644 --- a/nova/tests/functional/regressions/test_bug_1781286.py +++ b/nova/tests/functional/regressions/test_bug_1781286.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -import fixtures import mock from oslo_db import exception as oslo_db_exc @@ -67,11 +66,11 @@ class RescheduleBuildAvailabilityZoneUpCall( def wrap_bari(*args, **kwargs): # Poison the AZ query to blow up as if the cell conductor does not # have access to the API DB. - self.useFixture( - fixtures.MockPatch( - 'nova.objects.AggregateList.get_by_host', - side_effect=oslo_db_exc.CantStartEngineError)) - return original_bari(*args, **kwargs) + with mock.patch( + 'nova.objects.AggregateList.get_by_host', + side_effect=oslo_db_exc.CantStartEngineError + ): + return original_bari(*args, **kwargs) self.stub_out('nova.compute.manager.ComputeManager.' 'build_and_run_instance', wrap_bari) @@ -81,10 +80,6 @@ class RescheduleBuildAvailabilityZoneUpCall( # compute service we have to wait for the notification that the build # is complete and then stop the mock so we can use the API again. self.notifier.wait_for_versioned_notifications('instance.create.end') - # Note that we use stopall here because we actually called - # build_and_run_instance twice so we have more than one instance of - # the mock that needs to be stopped. - mock.patch.stopall() server = self._wait_for_state_change(server, 'ACTIVE') # We should have rescheduled and the instance AZ should be set from the # Selection object. Since neither compute host is in an AZ, the server @@ -128,19 +123,20 @@ class RescheduleMigrateAvailabilityZoneUpCall( self.rescheduled = None def wrap_prep_resize(_self, *args, **kwargs): - # Poison the AZ query to blow up as if the cell conductor does not - # have access to the API DB. - self.agg_mock = self.useFixture( - fixtures.MockPatch( - 'nova.objects.AggregateList.get_by_host', - side_effect=oslo_db_exc.CantStartEngineError)).mock if self.rescheduled is None: # Track the first host that we rescheduled from. self.rescheduled = _self.host # Trigger a reschedule. raise exception.ComputeResourcesUnavailable( reason='test_migrate_reschedule_blocked_az_up_call') - return original_prep_resize(_self, *args, **kwargs) + # Poison the AZ query to blow up as if the cell conductor does not + # have access to the API DB. + with mock.patch( + 'nova.objects.AggregateList.get_by_host', + side_effect=oslo_db_exc.CantStartEngineError, + ) as agg_mock: + self.agg_mock = agg_mock + return original_prep_resize(_self, *args, **kwargs) self.stub_out('nova.compute.manager.ComputeManager._prep_resize', wrap_prep_resize) diff --git a/nova/tests/functional/regressions/test_bug_1888395.py b/nova/tests/functional/regressions/test_bug_1888395.py index e582ad3e85..c50b78e2f6 100644 --- a/nova/tests/functional/regressions/test_bug_1888395.py +++ b/nova/tests/functional/regressions/test_bug_1888395.py @@ -23,14 +23,8 @@ from nova.tests.fixtures import libvirt as fakelibvirt from nova.tests.functional.libvirt import base as libvirt_base -class TestLiveMigrationWithoutMultiplePortBindings( +class TestLiveMigrationWithoutMultiplePortBindingsBase( libvirt_base.ServersTestBase): - """Regression test for bug 1888395. - - This regression test asserts that Live migration works when - neutron does not support the binding-extended api extension - and the legacy single port binding workflow is used. - """ ADMIN_API = True microversion = 'latest' @@ -72,6 +66,16 @@ class TestLiveMigrationWithoutMultiplePortBindings( 'nova.tests.fixtures.libvirt.Domain.migrateToURI3', self._migrate_stub)) + +class TestLiveMigrationWithoutMultiplePortBindings( + TestLiveMigrationWithoutMultiplePortBindingsBase): + """Regression test for bug 1888395. + + This regression test asserts that Live migration works when + neutron does not support the binding-extended api extension + and the legacy single port binding workflow is used. + """ + def _migrate_stub(self, domain, destination, params, flags): """Stub out migrateToURI3.""" @@ -124,3 +128,25 @@ class TestLiveMigrationWithoutMultiplePortBindings( server, {'OS-EXT-SRV-ATTR:host': 'end_host', 'status': 'ACTIVE'}) msg = "NotImplementedError: Cannot load 'vif_type' in the base class" self.assertNotIn(msg, self.stdlog.logger.output) + + +class TestLiveMigrationRollbackWithoutMultiplePortBindings( + TestLiveMigrationWithoutMultiplePortBindingsBase): + + def _migrate_stub(self, domain, destination, params, flags): + source = self.computes['start_host'] + conn = source.driver._host.get_connection() + dom = conn.lookupByUUIDString(self.server['id']) + dom.fail_job() + + def test_live_migration_rollback(self): + self.server = self._create_server( + host='start_host', + networks=[{'port': self.neutron.port_1['id']}]) + + self.assertFalse( + self.neutron_api.has_port_binding_extension(self.ctxt)) + # NOTE(artom) The live migration will still fail (we fail it in + # _migrate_stub()), but the server should correctly rollback to ACTIVE. + self._live_migrate(self.server, migration_expected_state='failed', + server_expected_state='ACTIVE') diff --git a/nova/tests/functional/regressions/test_bug_1890244.py b/nova/tests/functional/regressions/test_bug_1890244.py new file mode 100644 index 0000000000..bf969eebe7 --- /dev/null +++ b/nova/tests/functional/regressions/test_bug_1890244.py @@ -0,0 +1,96 @@ +# Copyright 2017 Ericsson +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nova import context +from nova import objects +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures +from nova.tests.functional import integrated_helpers + + +class IgnoreDeletedServerGroupsTest( + test.TestCase, integrated_helpers.InstanceHelperMixin, +): + """Regression test for bug 1890244 + + If instance are created as member of server groups it + should be possibel to evacuate them if the server groups are + deleted prior to the host failure. + """ + + def setUp(self): + super().setUp() + # Stub out external dependencies. + self.useFixture(nova_fixtures.NeutronFixture(self)) + self.useFixture(nova_fixtures.GlanceFixture(self)) + self.useFixture(func_fixtures.PlacementFixture()) + # Start nova controller services. + api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')) + self.api = api_fixture.admin_api + self.start_service('conductor') + # Use a custom weigher to make sure that we have a predictable + # scheduling sort order. + self.useFixture(nova_fixtures.HostNameWeigherFixture()) + self.start_service('scheduler') + # Start two computes, one where the server will be created and another + # where we'll evacuate it to. + self.src = self._start_compute('host1') + self.dest = self._start_compute('host2') + self.notifier = self.useFixture( + nova_fixtures.NotificationFixture(self) + ) + + def test_evacuate_after_group_delete(self): + # Create an anti-affinity group for the server. + body = { + 'server_group': { + 'name': 'test-group', + 'policies': ['anti-affinity'] + } + } + group_id = self.api.api_post( + '/os-server-groups', body).body['server_group']['id'] + + # Create a server in the group which should land on host1 due to our + # custom weigher. + body = {'server': self._build_server()} + body['os:scheduler_hints'] = {'group': group_id} + server = self.api.post_server(body) + server = self._wait_for_state_change(server, 'ACTIVE') + self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host']) + + # Down the source compute to enable the evacuation + self.api.microversion = '2.11' # Cap for the force-down call. + self.api.force_down_service('host1', 'nova-compute', True) + self.api.microversion = 'latest' + self.src.stop() + + # assert the server currently has a server group + reqspec = objects.RequestSpec.get_by_instance_uuid( + context.get_admin_context(), server['id']) + self.assertIsNotNone(reqspec.instance_group) + self.assertIn('group', reqspec.scheduler_hints) + # then delete it so that we need to clean it up on evac + self.api.api_delete(f'/os-server-groups/{group_id}') + + # Initiate evacuation + server = self._evacuate_server( + server, expected_host='host2', expected_migration_status='done' + ) + reqspec = objects.RequestSpec.get_by_instance_uuid( + context.get_admin_context(), server['id']) + self.assertIsNone(reqspec.instance_group) + self.assertNotIn('group', reqspec.scheduler_hints) diff --git a/nova/tests/functional/regressions/test_bug_1896463.py b/nova/tests/functional/regressions/test_bug_1896463.py index 6663ebe8cd..dc74791e0e 100644 --- a/nova/tests/functional/regressions/test_bug_1896463.py +++ b/nova/tests/functional/regressions/test_bug_1896463.py @@ -51,14 +51,6 @@ class TestEvacuateResourceTrackerRace( self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) - self.useFixture(fixtures.MockPatch( - 'nova.pci.utils.get_mac_by_pci_address', - return_value='52:54:00:1e:59:c6')) - - self.useFixture(fixtures.MockPatch( - 'nova.pci.utils.get_vf_num_by_pci_address', - return_value=1)) - self.admin_api = self.api_fixture.admin_api self.admin_api.microversion = 'latest' self.api = self.admin_api diff --git a/nova/tests/functional/regressions/test_bug_1951656.py b/nova/tests/functional/regressions/test_bug_1951656.py new file mode 100644 index 0000000000..d705ff6fe3 --- /dev/null +++ b/nova/tests/functional/regressions/test_bug_1951656.py @@ -0,0 +1,73 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils + + +from nova.tests.fixtures import libvirt as fakelibvirt +from nova.tests.functional.libvirt import test_vgpu +from nova.virt.libvirt import utils as libvirt_utils + + +class VGPUTestsLibvirt7_7(test_vgpu.VGPUTestBase): + + def _create_mdev(self, physical_device, mdev_type, uuid=None): + # We need to fake the newly created sysfs object by adding a new + # FakeMdevDevice in the existing persisted Connection object so + # when asking to get the existing mdevs, we would see it. + if not uuid: + uuid = uuidutils.generate_uuid() + mdev_name = libvirt_utils.mdev_uuid2name(uuid) + libvirt_parent = self.pci2libvirt_address(physical_device) + + # Libvirt 7.7 now creates mdevs with a parent_addr suffix. + new_mdev_name = '_'.join([mdev_name, libvirt_parent]) + + # Here, we get the right compute thanks by the self.current_host that + # was modified just before + connection = self.computes[ + self._current_host].driver._host.get_connection() + connection.mdev_info.devices.update( + {mdev_name: fakelibvirt.FakeMdevDevice(dev_name=new_mdev_name, + type_id=mdev_type, + parent=libvirt_parent)}) + return uuid + + def setUp(self): + super(VGPUTestsLibvirt7_7, self).setUp() + extra_spec = {"resources:VGPU": "1"} + self.flavor = self._create_flavor(extra_spec=extra_spec) + + # Start compute1 supporting only nvidia-11 + self.flags( + enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE, + group='devices') + + self.compute1 = self.start_compute_with_vgpu('host1') + + def test_create_servers_with_vgpu(self): + + # Create a single instance against a specific compute node. + self._create_server( + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + flavor_id=self.flavor, host=self.compute1.host, + networks='auto', expected_state='ACTIVE') + + self.assert_mdev_usage(self.compute1, expected_amount=1) + + self._create_server( + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + flavor_id=self.flavor, host=self.compute1.host, + networks='auto', expected_state='ACTIVE') + + self.assert_mdev_usage(self.compute1, expected_amount=2) diff --git a/nova/tests/functional/test_aggregates.py b/nova/tests/functional/test_aggregates.py index 8dfb345578..1ffa3ada92 100644 --- a/nova/tests/functional/test_aggregates.py +++ b/nova/tests/functional/test_aggregates.py @@ -935,11 +935,11 @@ class TestAggregateMultiTenancyIsolationFilter( # Start nova services. self.start_service('conductor') - self.admin_api = self.useFixture( - nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api - self.api = self.useFixture( - nova_fixtures.OSAPIFixture(api_version='v2.1', - project_id=uuids.non_admin)).api + api_fixture = self.useFixture( + nova_fixtures.OSAPIFixture(api_version='v2.1')) + self.admin_api = api_fixture.admin_api + self.api = api_fixture.api + self.api.project_id = uuids.non_admin # Add the AggregateMultiTenancyIsolation to the list of enabled # filters since it is not enabled by default. enabled_filters = CONF.filter_scheduler.enabled_filters @@ -1037,15 +1037,15 @@ class AggregateMultiTenancyIsolationColdMigrateTest( self.glance = self.useFixture(nova_fixtures.GlanceFixture(self)) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) - # Intentionally keep these separate since we want to create the - # server with the non-admin user in a different project. - admin_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( + # Intentionally define different project id for the two client since + # we want to create the server with the non-admin user in a different + # project. + api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1', project_id=uuids.admin_project)) - self.admin_api = admin_api_fixture.admin_api + self.admin_api = api_fixture.admin_api self.admin_api.microversion = 'latest' - user_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( - api_version='v2.1', project_id=uuids.user_project)) - self.api = user_api_fixture.api + self.api = api_fixture.api + self.api.project_id = uuids.user_project self.api.microversion = 'latest' self.start_service('conductor') diff --git a/nova/tests/functional/test_images.py b/nova/tests/functional/test_images.py index 340e883da9..e7e9f2a6c9 100644 --- a/nova/tests/functional/test_images.py +++ b/nova/tests/functional/test_images.py @@ -12,7 +12,6 @@ from oslo_utils.fixture import uuidsentinel as uuids -from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client from nova.tests.functional import integrated_helpers @@ -70,10 +69,9 @@ class ImagesTest(integrated_helpers._IntegratedTestBase): server = self.api.post_server({"server": server}) server = self._wait_for_state_change(server, 'ACTIVE') - # Create an admin API fixture with a unique project ID. - admin_api = self.useFixture( - nova_fixtures.OSAPIFixture( - project_id=uuids.admin_project)).admin_api + # use an admin API with a unique project ID. + admin_api = self.api_fixture.alternative_admin_api + admin_api.project_id = uuids.admin_project # Create a snapshot of the server using the admin project. name = 'admin-created-snapshot' diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py index 08e47b3971..a64a04b2c9 100644 --- a/nova/tests/functional/test_server_group.py +++ b/nova/tests/functional/test_server_group.py @@ -64,12 +64,12 @@ class ServerGroupTestBase(test.TestCase, self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) - api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( + self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) - self.api = api_fixture.api + self.api = self.api_fixture.api self.api.microversion = self.microversion - self.admin_api = api_fixture.admin_api + self.admin_api = self.api_fixture.admin_api self.admin_api.microversion = self.microversion self.start_service('conductor') @@ -174,13 +174,8 @@ class ServerGroupTestV21(ServerGroupTestBase): # Create an API using project 'openstack1'. # This is a non-admin API. - # - # NOTE(sdague): this is actually very much *not* how this - # fixture should be used. This actually spawns a whole - # additional API server. Should be addressed in the future. - api_openstack1 = self.useFixture(nova_fixtures.OSAPIFixture( - api_version=self.api_major_version, - project_id=PROJECT_ID_ALT)).api + api_openstack1 = self.api_fixture.alternative_api + api_openstack1.project_id = PROJECT_ID_ALT api_openstack1.microversion = self.microversion # Create a server group in project 'openstack' diff --git a/nova/tests/functional/test_server_rescue.py b/nova/tests/functional/test_server_rescue.py index fa96c10344..8f5b912943 100644 --- a/nova/tests/functional/test_server_rescue.py +++ b/nova/tests/functional/test_server_rescue.py @@ -10,6 +10,10 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime + +from oslo_utils.fixture import uuidsentinel as uuids + from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client from nova.tests.functional import integrated_helpers @@ -23,7 +27,37 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase): self.useFixture(nova_fixtures.CinderFixture(self)) self._start_compute(host='host1') - def _create_bfv_server(self): + def _create_image(self, metadata=None): + image = { + 'id': uuids.stable_rescue_image, + 'name': 'fake-image-rescue-property', + 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3), + 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3), + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': False, + 'container_format': 'raw', + 'disk_format': 'raw', + 'size': '25165824', + 'min_ram': 0, + 'min_disk': 0, + 'protected': False, + 'visibility': 'public', + 'tags': ['tag1', 'tag2'], + 'properties': { + 'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel', + 'hw_rescue_device': 'disk', + 'hw_rescue_bus': 'scsi', + }, + } + if metadata: + image['properties'].update(metadata) + return self.glance.create(None, image) + + def _create_bfv_server(self, metadata=None): + image = self._create_image(metadata=metadata) server_request = self._build_server(networks=[]) server_request.pop('imageRef') server_request['block_device_mapping_v2'] = [{ @@ -33,7 +67,7 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase): 'destination_type': 'volume'}] server = self.api.post_server({'server': server_request}) self._wait_for_state_change(server, 'ACTIVE') - return server + return server, image class DisallowBFVRescuev286(BFVRescue): @@ -43,10 +77,10 @@ class DisallowBFVRescuev286(BFVRescue): microversion = '2.86' def test_bfv_rescue_not_supported(self): - server = self._create_bfv_server() + server, image = self._create_bfv_server() ex = self.assertRaises(client.OpenStackApiException, self.api.post_server_action, server['id'], {'rescue': { - 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}}) + 'rescue_image_ref': image['id']}}) self.assertEqual(400, ex.response.status_code) self.assertIn('Cannot rescue a volume-backed instance', ex.response.text) @@ -60,10 +94,10 @@ class DisallowBFVRescuev286WithTrait(BFVRescue): microversion = '2.86' def test_bfv_rescue_not_supported(self): - server = self._create_bfv_server() + server, image = self._create_bfv_server() ex = self.assertRaises(client.OpenStackApiException, self.api.post_server_action, server['id'], {'rescue': { - 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}}) + 'rescue_image_ref': image['id']}}) self.assertEqual(400, ex.response.status_code) self.assertIn('Cannot rescue a volume-backed instance', ex.response.text) @@ -77,10 +111,10 @@ class DisallowBFVRescuev287WithoutTrait(BFVRescue): microversion = '2.87' def test_bfv_rescue_not_supported(self): - server = self._create_bfv_server() + server, image = self._create_bfv_server() ex = self.assertRaises(client.OpenStackApiException, self.api.post_server_action, server['id'], {'rescue': { - 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}}) + 'rescue_image_ref': image['id']}}) self.assertEqual(400, ex.response.status_code) self.assertIn('Host unable to rescue a volume-backed instance', ex.response.text) @@ -94,7 +128,41 @@ class AllowBFVRescuev287WithTrait(BFVRescue): microversion = '2.87' def test_bfv_rescue_supported(self): - server = self._create_bfv_server() + server, image = self._create_bfv_server() self.api.post_server_action(server['id'], {'rescue': { + 'rescue_image_ref': image['id']}}) + self._wait_for_state_change(server, 'RESCUE') + + +class DisallowBFVRescuev287WithoutRescueImageProperties(BFVRescue): + """Asserts that BFV rescue requests fail with microversion 2.87 (or later) + when the required hw_rescue_device and hw_rescue_bus image properties + are not set on the image. + """ + compute_driver = 'fake.MediumFakeDriver' + microversion = '2.87' + + def test_bfv_rescue_failed(self): + server, image = self._create_bfv_server() + # try rescue without hw_rescue_device and hw_rescue_bus properties set + ex = self.assertRaises(client.OpenStackApiException, + self.api.post_server_action, server['id'], {'rescue': { 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}}) + self.assertEqual(400, ex.response.status_code) + self.assertIn('Cannot rescue a volume-backed instance', + ex.response.text) + + +class AllowBFVRescuev287WithRescueImageProperties(BFVRescue): + """Asserts that BFV rescue requests pass with microversion 2.87 (or later) + when the required hw_rescue_device and hw_rescue_bus image properties + are set on the image. + """ + compute_driver = 'fake.RescueBFVDriver' + microversion = '2.87' + + def test_bfv_rescue_done(self): + server, image = self._create_bfv_server() + self.api.post_server_action(server['id'], {'rescue': { + 'rescue_image_ref': image['id']}}) self._wait_for_state_change(server, 'RESCUE') diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py index e77d4bf1ea..440195cd19 100644 --- a/nova/tests/functional/test_servers.py +++ b/nova/tests/functional/test_servers.py @@ -1253,9 +1253,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase): def test_get_servers_detail_filters(self): # We get the results only from the up cells, this ignoring the down # cells if list_records_by_skipping_down_cells config option is True. - api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( - api_version='v2.1')) - self.admin_api = api_fixture.admin_api + self.admin_api = self.api_fixture.admin_api self.admin_api.microversion = '2.69' servers = self.admin_api.get_servers( search_opts={'hostname': "cell3-inst0"}) @@ -1263,9 +1261,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase): self.assertEqual(self.up_cell_insts[2], servers[0]['id']) def test_get_servers_detail_all_tenants_with_down_cells(self): - api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( - api_version='v2.1')) - self.admin_api = api_fixture.admin_api + self.admin_api = self.api_fixture.admin_api self.admin_api.microversion = '2.69' servers = self.admin_api.get_servers(search_opts={'all_tenants': True}) # 4 servers from the up cells and 4 servers from the down cells @@ -1523,10 +1519,8 @@ class ServersTestV280(integrated_helpers._IntegratedTestBase): def setUp(self): super(ServersTestV280, self).setUp() - api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( - api_version='v2.1')) - self.api = api_fixture.api - self.admin_api = api_fixture.admin_api + self.api = self.api_fixture.api + self.admin_api = self.api_fixture.admin_api self.api.microversion = '2.80' self.admin_api.microversion = '2.80' @@ -1585,9 +1579,8 @@ class ServersTestV280(integrated_helpers._IntegratedTestBase): project_id_1 = '4906260553374bf0a5d566543b320516' project_id_2 = 'c850298c1b6b4796a8f197ac310b2469' - new_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( - api_version=self.api_major_version, project_id=project_id_1)) - new_admin_api = new_api_fixture.admin_api + new_admin_api = self.api_fixture.alternative_admin_api + new_admin_api.project_id = project_id_1 new_admin_api.microversion = '2.80' post = { diff --git a/nova/tests/unit/api/openstack/compute/test_create_backup.py b/nova/tests/unit/api/openstack/compute/test_create_backup.py index f7280a5a37..70978d11de 100644 --- a/nova/tests/unit/api/openstack/compute/test_create_backup.py +++ b/nova/tests/unit/api/openstack/compute/test_create_backup.py @@ -40,10 +40,6 @@ class CreateBackupTestsV21(admin_only_action_common.CommonMixin, self.controller = getattr(self.create_backup, self.controller_name)() self.compute_api = self.controller.compute_api - patch_get = mock.patch.object(self.compute_api, 'get') - self.mock_get = patch_get.start() - self.addCleanup(patch_get.stop) - @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(api.API, 'backup') def test_create_backup_with_metadata(self, mock_backup, mock_check_image): diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py index 8c25a2efc2..1c5c34e758 100644 --- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py +++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py @@ -353,14 +353,37 @@ class FlavorAccessTestV21(test.NoDBTestCase): mock_verify.assert_called_once_with( req.environ['nova.context'], 'proj2') + @mock.patch('nova.objects.Flavor.remove_access') @mock.patch('nova.api.openstack.identity.verify_project_id', side_effect=exc.HTTPBadRequest( explanation="Project ID proj2 is not a valid project.")) - def test_remove_tenant_access_with_invalid_tenant(self, mock_verify): + def test_remove_tenant_access_with_invalid_tenant(self, + mock_verify, + mock_remove_access): """Tests the case that the tenant does not exist in Keystone.""" req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action', use_admin_context=True) body = {'removeTenantAccess': {'tenant': 'proj2'}} + + self.flavor_action_controller._remove_tenant_access( + req, '2', body=body) + mock_verify.assert_called_once_with( + req.environ['nova.context'], 'proj2') + mock_remove_access.assert_called_once_with('proj2') + + @mock.patch('nova.api.openstack.identity.verify_project_id', + side_effect=exc.HTTPBadRequest( + explanation="Nova was unable to find Keystone " + "service endpoint.")) + def test_remove_tenant_access_missing_keystone_endpoint(self, + mock_verify): + """Tests the case that Keystone identity service endpoint + version 3.0 was not found. + """ + req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action', + use_admin_context=True) + body = {'removeTenantAccess': {'tenant': 'proj2'}} + self.assertRaises(exc.HTTPBadRequest, self.flavor_action_controller._remove_tenant_access, req, '2', body=body) diff --git a/nova/tests/unit/api/openstack/compute/test_hypervisors.py b/nova/tests/unit/api/openstack/compute/test_hypervisors.py index facc5389be..6545031a0b 100644 --- a/nova/tests/unit/api/openstack/compute/test_hypervisors.py +++ b/nova/tests/unit/api/openstack/compute/test_hypervisors.py @@ -368,25 +368,23 @@ class HypervisorsTestV21(test.NoDBTestCase): return TEST_SERVICES[0] raise exception.ComputeHostNotFound(host=host) - @mock.patch.object(self.controller.host_api, 'compute_node_get_all', - return_value=compute_nodes) - @mock.patch.object(self.controller.host_api, - 'service_get_by_compute_host', - fake_service_get_by_compute_host) - def _test(self, compute_node_get_all): - req = self._get_request(True) - result = self.controller.index(req) - self.assertEqual(1, len(result['hypervisors'])) - expected = { - 'id': compute_nodes[0].uuid if self.expect_uuid_for_id - else compute_nodes[0].id, - 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname, - 'state': 'up', - 'status': 'enabled', - } - self.assertDictEqual(expected, result['hypervisors'][0]) + m_get = self.controller.host_api.compute_node_get_all + m_get.side_effect = None + m_get.return_value = compute_nodes + self.controller.host_api.service_get_by_compute_host.side_effect = ( + fake_service_get_by_compute_host) - _test(self) + req = self._get_request(True) + result = self.controller.index(req) + self.assertEqual(1, len(result['hypervisors'])) + expected = { + 'id': compute_nodes[0].uuid if self.expect_uuid_for_id + else compute_nodes[0].id, + 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname, + 'state': 'up', + 'status': 'enabled', + } + self.assertDictEqual(expected, result['hypervisors'][0]) def test_index_compute_host_not_mapped(self): """Tests that we don't fail index if a host is not mapped.""" @@ -402,25 +400,22 @@ class HypervisorsTestV21(test.NoDBTestCase): return TEST_SERVICES[0] raise exception.HostMappingNotFound(name=host) - @mock.patch.object(self.controller.host_api, 'compute_node_get_all', - return_value=compute_nodes) - @mock.patch.object(self.controller.host_api, - 'service_get_by_compute_host', - fake_service_get_by_compute_host) - def _test(self, compute_node_get_all): - req = self._get_request(True) - result = self.controller.index(req) - self.assertEqual(1, len(result['hypervisors'])) - expected = { - 'id': compute_nodes[0].uuid if self.expect_uuid_for_id - else compute_nodes[0].id, - 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname, - 'state': 'up', - 'status': 'enabled', - } - self.assertDictEqual(expected, result['hypervisors'][0]) + self.controller.host_api.compute_node_get_all.return_value = ( + compute_nodes) + self.controller.host_api.service_get_by_compute_host = ( + fake_service_get_by_compute_host) - _test(self) + req = self._get_request(True) + result = self.controller.index(req) + self.assertEqual(1, len(result['hypervisors'])) + expected = { + 'id': compute_nodes[0].uuid if self.expect_uuid_for_id + else compute_nodes[0].id, + 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname, + 'state': 'up', + 'status': 'enabled', + } + self.assertDictEqual(expected, result['hypervisors'][0]) def test_detail(self): req = self._get_request(True) @@ -444,32 +439,30 @@ class HypervisorsTestV21(test.NoDBTestCase): return TEST_SERVICES[0] raise exception.ComputeHostNotFound(host=host) - @mock.patch.object(self.controller.host_api, 'compute_node_get_all', - return_value=compute_nodes) - @mock.patch.object(self.controller.host_api, - 'service_get_by_compute_host', - fake_service_get_by_compute_host) - def _test(self, compute_node_get_all): - req = self._get_request(True) - result = self.controller.detail(req) - self.assertEqual(1, len(result['hypervisors'])) - expected = { - 'id': compute_nodes[0].id, - 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname, - 'state': 'up', - 'status': 'enabled', - } - # we don't care about all of the details, just make sure we get - # the subset we care about and there are more keys than what index - # would return - hypervisor = result['hypervisors'][0] - self.assertTrue( - set(expected.keys()).issubset(set(hypervisor.keys()))) - self.assertGreater(len(hypervisor.keys()), len(expected.keys())) - self.assertEqual(compute_nodes[0].hypervisor_hostname, - hypervisor['hypervisor_hostname']) - - _test(self) + m_get = self.controller.host_api.compute_node_get_all + m_get.side_effect = None + m_get.return_value = compute_nodes + self.controller.host_api.service_get_by_compute_host.side_effect = ( + fake_service_get_by_compute_host) + + req = self._get_request(True) + result = self.controller.detail(req) + self.assertEqual(1, len(result['hypervisors'])) + expected = { + 'id': compute_nodes[0].id, + 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname, + 'state': 'up', + 'status': 'enabled', + } + # we don't care about all of the details, just make sure we get + # the subset we care about and there are more keys than what index + # would return + hypervisor = result['hypervisors'][0] + self.assertTrue( + set(expected.keys()).issubset(set(hypervisor.keys()))) + self.assertGreater(len(hypervisor.keys()), len(expected.keys())) + self.assertEqual(compute_nodes[0].hypervisor_hostname, + hypervisor['hypervisor_hostname']) def test_detail_compute_host_not_mapped(self): """Tests that if a service is deleted but the compute node is not we @@ -487,32 +480,28 @@ class HypervisorsTestV21(test.NoDBTestCase): return TEST_SERVICES[0] raise exception.HostMappingNotFound(name=host) - @mock.patch.object(self.controller.host_api, 'compute_node_get_all', - return_value=compute_nodes) - @mock.patch.object(self.controller.host_api, - 'service_get_by_compute_host', - fake_service_get_by_compute_host) - def _test(self, compute_node_get_all): - req = self._get_request(True) - result = self.controller.detail(req) - self.assertEqual(1, len(result['hypervisors'])) - expected = { - 'id': compute_nodes[0].id, - 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname, - 'state': 'up', - 'status': 'enabled', - } - # we don't care about all of the details, just make sure we get - # the subset we care about and there are more keys than what index - # would return - hypervisor = result['hypervisors'][0] - self.assertTrue( - set(expected.keys()).issubset(set(hypervisor.keys()))) - self.assertGreater(len(hypervisor.keys()), len(expected.keys())) - self.assertEqual(compute_nodes[0].hypervisor_hostname, - hypervisor['hypervisor_hostname']) - - _test(self) + self.controller.host_api.service_get_by_compute_host.side_effect = ( + fake_service_get_by_compute_host) + self.controller.host_api.compute_node_get_all.return_value = ( + compute_nodes) + req = self._get_request(True) + result = self.controller.detail(req) + self.assertEqual(1, len(result['hypervisors'])) + expected = { + 'id': compute_nodes[0].id, + 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname, + 'state': 'up', + 'status': 'enabled', + } + # we don't care about all of the details, just make sure we get + # the subset we care about and there are more keys than what index + # would return + hypervisor = result['hypervisors'][0] + self.assertTrue( + set(expected.keys()).issubset(set(hypervisor.keys()))) + self.assertGreater(len(hypervisor.keys()), len(expected.keys())) + self.assertEqual(compute_nodes[0].hypervisor_hostname, + hypervisor['hypervisor_hostname']) def test_show(self): req = self._get_request(True) @@ -525,21 +514,16 @@ class HypervisorsTestV21(test.NoDBTestCase): """Tests that if a service is deleted but the compute node is not we don't fail when listing hypervisors. """ - - @mock.patch.object(self.controller.host_api, 'compute_node_get', - return_value=self.TEST_HYPERS_OBJ[0]) - @mock.patch.object(self.controller.host_api, - 'service_get_by_compute_host') - def _test(self, mock_service, mock_compute_node_get): - req = self._get_request(True) - mock_service.side_effect = exception.HostMappingNotFound( - name='foo') - hyper_id = self._get_hyper_id() - self.assertRaises(exc.HTTPNotFound, self.controller.show, - req, hyper_id) - self.assertTrue(mock_service.called) - mock_compute_node_get.assert_called_once_with(mock.ANY, hyper_id) - _test(self) + self.controller.host_api.service_get_by_compute_host.side_effect = ( + exception.HostMappingNotFound(name='foo')) + req = self._get_request(True) + hyper_id = self._get_hyper_id() + self.assertRaises( + exc.HTTPNotFound, self.controller.show, req, hyper_id) + self.assertTrue( + self.controller.host_api.service_get_by_compute_host.called) + self.controller.host_api.compute_node_get.assert_called_once_with( + mock.ANY, hyper_id) def test_show_noid(self): req = self._get_request(True) @@ -611,20 +595,15 @@ class HypervisorsTestV21(test.NoDBTestCase): mock.ANY, self.TEST_HYPERS_OBJ[0].host) def test_uptime_hypervisor_not_mapped_service_get(self): - @mock.patch.object(self.controller.host_api, 'compute_node_get') - @mock.patch.object(self.controller.host_api, 'get_host_uptime') - @mock.patch.object(self.controller.host_api, - 'service_get_by_compute_host', - side_effect=exception.HostMappingNotFound( - name='dummy')) - def _test(mock_get, _, __): - req = self._get_request(True) - hyper_id = self._get_hyper_id() - self.assertRaises(exc.HTTPNotFound, - self.controller.uptime, req, hyper_id) - self.assertTrue(mock_get.called) + self.controller.host_api.service_get_by_compute_host.side_effect = ( + exception.HostMappingNotFound(name='dummy')) - _test() + req = self._get_request(True) + hyper_id = self._get_hyper_id() + self.assertRaises(exc.HTTPNotFound, + self.controller.uptime, req, hyper_id) + self.assertTrue( + self.controller.host_api.service_get_by_compute_host.called) def test_uptime_hypervisor_not_mapped(self): with mock.patch.object(self.controller.host_api, 'get_host_uptime', @@ -644,30 +623,26 @@ class HypervisorsTestV21(test.NoDBTestCase): self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result) def test_search_non_exist(self): - with mock.patch.object(self.controller.host_api, - 'compute_node_search_by_hypervisor', - return_value=[]) as mock_node_search: - req = self._get_request(True) - self.assertRaises(exc.HTTPNotFound, self.controller.search, - req, 'a') - self.assertEqual(1, mock_node_search.call_count) + m_search = self.controller.host_api.compute_node_search_by_hypervisor + m_search.side_effect = None + m_search.return_value = [] + + req = self._get_request(True) + self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a') + self.assertEqual(1, m_search.call_count) def test_search_unmapped(self): + m_search = self.controller.host_api.compute_node_search_by_hypervisor + m_search.side_effect = None + m_search.return_value = [mock.MagicMock()] - @mock.patch.object(self.controller.host_api, - 'compute_node_search_by_hypervisor') - @mock.patch.object(self.controller.host_api, - 'service_get_by_compute_host') - def _test(mock_service, mock_search): - mock_search.return_value = [mock.MagicMock()] - mock_service.side_effect = exception.HostMappingNotFound( - name='foo') - req = self._get_request(True) - self.assertRaises(exc.HTTPNotFound, self.controller.search, - req, 'a') - self.assertTrue(mock_service.called) + self.controller.host_api.service_get_by_compute_host.side_effect = ( + exception.HostMappingNotFound(name='foo')) - _test() + req = self._get_request(True) + self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a') + self.assertTrue( + self.controller.host_api.service_get_by_compute_host.called) @mock.patch.object(objects.InstanceList, 'get_by_host', side_effect=fake_instance_get_all_by_host) @@ -702,15 +677,12 @@ class HypervisorsTestV21(test.NoDBTestCase): def test_servers_compute_host_not_found(self): req = self._get_request(True) - with test.nested( - mock.patch.object( - self.controller.host_api, 'instance_get_all_by_host', - side_effect=fake_instance_get_all_by_host, - ), - mock.patch.object( - self.controller.host_api, 'service_get_by_compute_host', - side_effect=exception.ComputeHostNotFound(host='foo'), - ), + self.controller.host_api.service_get_by_compute_host.side_effect = ( + exception.ComputeHostNotFound(host='foo')) + with mock.patch.object( + self.controller.host_api, + 'instance_get_all_by_host', + side_effect=fake_instance_get_all_by_host, ): # The result should be empty since every attempt to fetch the # service for a hypervisor "failed" @@ -718,24 +690,25 @@ class HypervisorsTestV21(test.NoDBTestCase): self.assertEqual({'hypervisors': []}, result) def test_servers_non_id(self): - with mock.patch.object(self.controller.host_api, - 'compute_node_search_by_hypervisor', - return_value=[]) as mock_node_search: - req = self._get_request(True) - self.assertRaises(exc.HTTPNotFound, - self.controller.servers, - req, '115') - self.assertEqual(1, mock_node_search.call_count) + m_search = self.controller.host_api.compute_node_search_by_hypervisor + m_search.side_effect = None + m_search.return_value = [] + + req = self._get_request(True) + self.assertRaises(exc.HTTPNotFound, + self.controller.servers, + req, '115') + self.assertEqual(1, m_search.call_count) def test_servers_with_non_integer_hypervisor_id(self): - with mock.patch.object(self.controller.host_api, - 'compute_node_search_by_hypervisor', - return_value=[]) as mock_node_search: + m_search = self.controller.host_api.compute_node_search_by_hypervisor + m_search.side_effect = None + m_search.return_value = [] - req = self._get_request(True) - self.assertRaises(exc.HTTPNotFound, - self.controller.servers, req, 'abc') - self.assertEqual(1, mock_node_search.call_count) + req = self._get_request(True) + self.assertRaises( + exc.HTTPNotFound, self.controller.servers, req, 'abc') + self.assertEqual(1, m_search.call_count) def test_servers_with_no_servers(self): with mock.patch.object(self.controller.host_api, @@ -1089,15 +1062,13 @@ class HypervisorsTestV253(HypervisorsTestV252): use_admin_context=True, url='/os-hypervisors?with_servers=1') - with test.nested( - mock.patch.object( - self.controller.host_api, 'instance_get_all_by_host', - side_effect=fake_instance_get_all_by_host, - ), - mock.patch.object( - self.controller.host_api, 'service_get_by_compute_host', - side_effect=exception.ComputeHostNotFound(host='foo'), - ), + self.controller.host_api.service_get_by_compute_host.side_effect = ( + exception.ComputeHostNotFound(host='foo')) + + with mock.patch.object( + self.controller.host_api, + "instance_get_all_by_host", + side_effect=fake_instance_get_all_by_host, ): # The result should be empty since every attempt to fetch the # service for a hypervisor "failed" @@ -1157,11 +1128,13 @@ class HypervisorsTestV253(HypervisorsTestV252): use_admin_context=True, url='/os-hypervisors?with_servers=yes&' 'hypervisor_hostname_pattern=shenzhen') - with mock.patch.object(self.controller.host_api, - 'compute_node_search_by_hypervisor', - return_value=objects.ComputeNodeList()) as s: - self.assertRaises(exc.HTTPNotFound, self.controller.index, req) - s.assert_called_once_with(req.environ['nova.context'], 'shenzhen') + m_search = self.controller.host_api.compute_node_search_by_hypervisor + m_search.side_effect = None + m_search.return_value = objects.ComputeNodeList() + + self.assertRaises(exc.HTTPNotFound, self.controller.index, req) + m_search.assert_called_once_with( + req.environ['nova.context'], 'shenzhen') def test_detail_with_hostname_pattern(self): """Test listing hypervisors with details and using the @@ -1170,13 +1143,14 @@ class HypervisorsTestV253(HypervisorsTestV252): req = self._get_request( use_admin_context=True, url='/os-hypervisors?hypervisor_hostname_pattern=shenzhen') - with mock.patch.object( - self.controller.host_api, - 'compute_node_search_by_hypervisor', - return_value=objects.ComputeNodeList(objects=[TEST_HYPERS_OBJ[0]]) - ) as s: - result = self.controller.detail(req) - s.assert_called_once_with(req.environ['nova.context'], 'shenzhen') + m_search = self.controller.host_api.compute_node_search_by_hypervisor + m_search.side_effect = None + m_search.return_value = objects.ComputeNodeList( + objects=[TEST_HYPERS_OBJ[0]]) + + result = self.controller.detail(req) + m_search.assert_called_once_with( + req.environ['nova.context'], 'shenzhen') expected = {'hypervisors': [self.DETAIL_HYPERS_DICTS[0]]} @@ -1483,15 +1457,11 @@ class HypervisorsTestV288(HypervisorsTestV275): self.controller.uptime, req) def test_uptime_old_version(self): - with mock.patch.object( - self.controller.host_api, 'get_host_uptime', - return_value='fake uptime', - ): - req = self._get_request(use_admin_context=True, version='2.87') - hyper_id = self._get_hyper_id() + req = self._get_request(use_admin_context=True, version='2.87') + hyper_id = self._get_hyper_id() - # no exception == pass - self.controller.uptime(req, hyper_id) + # no exception == pass + self.controller.uptime(req, hyper_id) def test_uptime_noid(self): # the separate 'uptime' API has been removed, so skip this test @@ -1526,34 +1496,36 @@ class HypervisorsTestV288(HypervisorsTestV275): pass def test_show_with_uptime_notimplemented(self): - with mock.patch.object( - self.controller.host_api, 'get_host_uptime', - side_effect=NotImplementedError, - ) as mock_get_uptime: - req = self._get_request(use_admin_context=True) - hyper_id = self._get_hyper_id() + self.controller.host_api.get_host_uptime.side_effect = ( + NotImplementedError()) - result = self.controller.show(req, hyper_id) + req = self._get_request(use_admin_context=True) + hyper_id = self._get_hyper_id() - expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0]) - expected_dict.update({'uptime': None}) - self.assertEqual({'hypervisor': expected_dict}, result) - self.assertEqual(1, mock_get_uptime.call_count) + result = self.controller.show(req, hyper_id) + + expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0]) + expected_dict.update({'uptime': None}) + self.assertEqual({'hypervisor': expected_dict}, result) + self.assertEqual( + 1, self.controller.host_api.get_host_uptime.call_count) def test_show_with_uptime_hypervisor_down(self): - with mock.patch.object( - self.controller.host_api, 'get_host_uptime', - side_effect=exception.ComputeServiceUnavailable(host='dummy') - ) as mock_get_uptime: - req = self._get_request(use_admin_context=True) - hyper_id = self._get_hyper_id() + self.controller.host_api.get_host_uptime.side_effect = ( + exception.ComputeServiceUnavailable(host='dummy')) - result = self.controller.show(req, hyper_id) + req = self._get_request(use_admin_context=True) + hyper_id = self._get_hyper_id() - expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0]) - expected_dict.update({'uptime': None}) - self.assertEqual({'hypervisor': expected_dict}, result) - self.assertEqual(1, mock_get_uptime.call_count) + result = self.controller.show(req, hyper_id) + + expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0]) + expected_dict.update({'uptime': None}) + self.assertEqual({'hypervisor': expected_dict}, result) + self.assertEqual( + 1, + self.controller.host_api.get_host_uptime.call_count + ) def test_show_old_version(self): # ensure things still work as expected here diff --git a/nova/tests/unit/api/openstack/compute/test_limits.py b/nova/tests/unit/api/openstack/compute/test_limits.py index a5ac0bca24..69676e28ac 100644 --- a/nova/tests/unit/api/openstack/compute/test_limits.py +++ b/nova/tests/unit/api/openstack/compute/test_limits.py @@ -34,7 +34,6 @@ from nova.limit import local as local_limit from nova.limit import placement as placement_limit from nova import objects from nova.policies import limits as l_policies -from nova import quota from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit import matchers @@ -52,12 +51,12 @@ class BaseLimitTestSuite(test.NoDBTestCase): return {k: dict(limit=v, in_use=v // 2) for k, v in self.absolute_limits.items()} - mock_get_project_quotas = mock.patch.object( + patcher_get_project_quotas = mock.patch.object( nova.quota.QUOTAS, "get_project_quotas", - side_effect = stub_get_project_quotas) - mock_get_project_quotas.start() - self.addCleanup(mock_get_project_quotas.stop) + side_effect=stub_get_project_quotas) + self.mock_get_project_quotas = patcher_get_project_quotas.start() + self.addCleanup(patcher_get_project_quotas.stop) patcher = self.mock_can = mock.patch('nova.context.RequestContext.can') self.mock_can = patcher.start() self.addCleanup(patcher.stop) @@ -154,16 +153,14 @@ class LimitsControllerTestV21(BaseLimitTestSuite): return {k: dict(limit=v, in_use=v // 2) for k, v in self.absolute_limits.items()} - with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \ - get_project_quotas: - get_project_quotas.side_effect = _get_project_quotas + self.mock_get_project_quotas.side_effect = _get_project_quotas - response = request.get_response(self.controller) + response = request.get_response(self.controller) - body = jsonutils.loads(response.body) - self.assertEqual(expected, body) - get_project_quotas.assert_called_once_with(context, tenant_id, - usages=True) + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + self.mock_get_project_quotas.assert_called_once_with( + context, tenant_id, usages=True) def _do_test_used_limits(self, reserved): request = self._get_index_request(tenant_id=None) @@ -186,8 +183,7 @@ class LimitsControllerTestV21(BaseLimitTestSuite): def stub_get_project_quotas(context, project_id, usages=True): return limits - self.stub_out('nova.quota.QUOTAS.get_project_quotas', - stub_get_project_quotas) + self.mock_get_project_quotas.side_effect = stub_get_project_quotas res = request.get_response(self.controller) body = jsonutils.loads(res.body) @@ -211,14 +207,15 @@ class LimitsControllerTestV21(BaseLimitTestSuite): user_id=user_id, project_id=project_id) context = fake_req.environ["nova.context"] - with mock.patch.object(quota.QUOTAS, 'get_project_quotas', - return_value={}) as mock_get_quotas: - fake_req.get_response(self.controller) - self.assertEqual(2, self.mock_can.call_count) - self.mock_can.assert_called_with( - l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME) - mock_get_quotas.assert_called_once_with(context, - tenant_id, usages=True) + self.mock_get_project_quotas.side_effect = None + self.mock_get_project_quotas.return_value = {} + + fake_req.get_response(self.controller) + self.assertEqual(2, self.mock_can.call_count) + self.mock_can.assert_called_with( + l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME) + self.mock_get_project_quotas.assert_called_once_with(context, + tenant_id, usages=True) def _test_admin_can_fetch_used_limits_for_own_project(self, req_get): project_id = "123456" @@ -230,11 +227,12 @@ class LimitsControllerTestV21(BaseLimitTestSuite): project_id=project_id) context = fake_req.environ["nova.context"] - with mock.patch.object(quota.QUOTAS, 'get_project_quotas', - return_value={}) as mock_get_quotas: - fake_req.get_response(self.controller) - mock_get_quotas.assert_called_once_with(context, - project_id, usages=True) + self.mock_get_project_quotas.side_effect = None + self.mock_get_project_quotas.return_value = {} + + fake_req.get_response(self.controller) + self.mock_get_project_quotas.assert_called_once_with( + context, project_id, usages=True) def test_admin_can_fetch_used_limits_for_own_project(self): req_get = {} @@ -262,12 +260,13 @@ class LimitsControllerTestV21(BaseLimitTestSuite): project_id = "123456" fake_req = self._get_index_request(project_id=project_id) context = fake_req.environ["nova.context"] - with mock.patch.object(quota.QUOTAS, 'get_project_quotas', - return_value={}) as mock_get_quotas: - fake_req.get_response(self.controller) + self.mock_get_project_quotas.side_effect = None + self.mock_get_project_quotas.return_value = {} - mock_get_quotas.assert_called_once_with(context, - project_id, usages=True) + fake_req.get_response(self.controller) + + self.mock_get_project_quotas.assert_called_once_with( + context, project_id, usages=True) def test_used_ram_added(self): fake_req = self._get_index_request() @@ -275,28 +274,26 @@ class LimitsControllerTestV21(BaseLimitTestSuite): def stub_get_project_quotas(context, project_id, usages=True): return {'ram': {'limit': 512, 'in_use': 256}} - with mock.patch.object(quota.QUOTAS, 'get_project_quotas', - side_effect=stub_get_project_quotas - ) as mock_get_quotas: + self.mock_get_project_quotas.side_effect = stub_get_project_quotas - res = fake_req.get_response(self.controller) - body = jsonutils.loads(res.body) - abs_limits = body['limits']['absolute'] - self.assertIn('totalRAMUsed', abs_limits) - self.assertEqual(256, abs_limits['totalRAMUsed']) - self.assertEqual(1, mock_get_quotas.call_count) + res = fake_req.get_response(self.controller) + body = jsonutils.loads(res.body) + abs_limits = body['limits']['absolute'] + self.assertIn('totalRAMUsed', abs_limits) + self.assertEqual(256, abs_limits['totalRAMUsed']) + self.assertEqual(1, self.mock_get_project_quotas.call_count) def test_no_ram_quota(self): fake_req = self._get_index_request() - with mock.patch.object(quota.QUOTAS, 'get_project_quotas', - return_value={}) as mock_get_quotas: + self.mock_get_project_quotas.side_effect = None + self.mock_get_project_quotas.return_value = {} - res = fake_req.get_response(self.controller) - body = jsonutils.loads(res.body) - abs_limits = body['limits']['absolute'] - self.assertNotIn('totalRAMUsed', abs_limits) - self.assertEqual(1, mock_get_quotas.call_count) + res = fake_req.get_response(self.controller) + body = jsonutils.loads(res.body) + abs_limits = body['limits']['absolute'] + self.assertNotIn('totalRAMUsed', abs_limits) + self.assertEqual(1, self.mock_get_project_quotas.call_count) class FakeHttplibSocket(object): @@ -398,25 +395,24 @@ class LimitsControllerTestV236(BaseLimitTestSuite): return {k: dict(limit=v, in_use=v // 2) for k, v in absolute_limits.items()} - with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \ - get_project_quotas: - get_project_quotas.side_effect = _get_project_quotas - response = self.controller.index(self.req) - expected_response = { - "limits": { - "rate": [], - "absolute": { - "maxTotalRAMSize": 512, - "maxTotalInstances": 5, - "maxTotalCores": 21, - "maxTotalKeypairs": 10, - "totalRAMUsed": 256, - "totalCoresUsed": 10, - "totalInstancesUsed": 2, - }, + self.mock_get_project_quotas.side_effect = _get_project_quotas + + response = self.controller.index(self.req) + expected_response = { + "limits": { + "rate": [], + "absolute": { + "maxTotalRAMSize": 512, + "maxTotalInstances": 5, + "maxTotalCores": 21, + "maxTotalKeypairs": 10, + "totalRAMUsed": 256, + "totalCoresUsed": 10, + "totalInstancesUsed": 2, }, - } - self.assertEqual(expected_response, response) + }, + } + self.assertEqual(expected_response, response) class LimitsControllerTestV239(BaseLimitTestSuite): @@ -436,21 +432,20 @@ class LimitsControllerTestV239(BaseLimitTestSuite): return {k: dict(limit=v, in_use=v // 2) for k, v in absolute_limits.items()} - with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \ - get_project_quotas: - get_project_quotas.side_effect = _get_project_quotas - response = self.controller.index(self.req) - # staring from version 2.39 there is no 'maxImageMeta' field - # in response after removing 'image-metadata' proxy API - expected_response = { - "limits": { - "rate": [], - "absolute": { - "maxServerMeta": 1, - }, + self.mock_get_project_quotas.side_effect = _get_project_quotas + + response = self.controller.index(self.req) + # starting from version 2.39 there is no 'maxImageMeta' field + # in response after removing 'image-metadata' proxy API + expected_response = { + "limits": { + "rate": [], + "absolute": { + "maxServerMeta": 1, }, - } - self.assertEqual(expected_response, response) + }, + } + self.assertEqual(expected_response, response) class LimitsControllerTestV275(BaseLimitTestSuite): @@ -469,10 +464,9 @@ class LimitsControllerTestV275(BaseLimitTestSuite): return {k: dict(limit=v, in_use=v // 2) for k, v in absolute_limits.items()} - with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \ - get_project_quotas: - get_project_quotas.side_effect = _get_project_quotas - self.controller.index(req) + self.mock_get_project_quotas.side_effect = _get_project_quotas + self.controller.index(req) + self.controller.index(req) def test_index_additional_query_param(self): req = fakes.HTTPRequest.blank("/?unkown=fake", diff --git a/nova/tests/unit/api/openstack/compute/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/test_migrate_server.py index 683759eccc..325b4927b3 100644 --- a/nova/tests/unit/api/openstack/compute/test_migrate_server.py +++ b/nova/tests/unit/api/openstack/compute/test_migrate_server.py @@ -530,9 +530,8 @@ class MigrateServerTestsV256(MigrateServerTestsV234): self.req, fakes.FAKE_UUID, body=body) def _test_migrate_exception(self, exc_info, expected_result): - @mock.patch.object(self.compute_api, 'get') @mock.patch.object(self.compute_api, 'resize', side_effect=exc_info) - def _test(mock_resize, mock_get): + def _test(mock_resize): instance = objects.Instance(uuid=uuids.instance) self.assertRaises(expected_result, self.controller._migrate, diff --git a/nova/tests/unit/api/openstack/compute/test_quotas.py b/nova/tests/unit/api/openstack/compute/test_quotas.py index 6cb8d9c7ad..7e4f9d1374 100644 --- a/nova/tests/unit/api/openstack/compute/test_quotas.py +++ b/nova/tests/unit/api/openstack/compute/test_quotas.py @@ -882,7 +882,8 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest): local_limit.KEY_PAIRS: 100, local_limit.SERVER_GROUPS: 12, local_limit.SERVER_GROUP_MEMBERS: 10} - self.useFixture(limit_fixture.LimitFixture(reglimits, {})) + self.limit_fixture = self.useFixture( + limit_fixture.LimitFixture(reglimits, {})) @mock.patch.object(placement_limit, "get_legacy_project_limits") def test_show_v21(self, mock_proj): @@ -1098,7 +1099,7 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest): local_limit.KEY_PAIRS: 1, local_limit.SERVER_GROUPS: 3, local_limit.SERVER_GROUP_MEMBERS: 2} - self.useFixture(limit_fixture.LimitFixture(reglimits, {})) + self.limit_fixture.reglimits = reglimits req = fakes.HTTPRequest.blank("") response = self.controller.defaults(req, uuids.project_id) diff --git a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py index 6427b1abf0..f62093bbb7 100644 --- a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py +++ b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py @@ -103,6 +103,18 @@ class ConsolesExtensionTestV21(test.NoDBTestCase): 'get_vnc_console', exception.InstanceNotFound(instance_id=fakes.FAKE_UUID)) + def test_get_vnc_console_instance_invalid_state(self): + body = {'os-getVNCConsole': {'type': 'novnc'}} + self._check_console_failure( + self.controller.get_vnc_console, + webob.exc.HTTPConflict, + body, + 'get_vnc_console', + exception.InstanceInvalidState( + attr='fake-attr', state='fake-state', method='fake-method', + instance_uuid=fakes.FAKE_UUID) + ) + def test_get_vnc_console_invalid_type(self): body = {'os-getVNCConsole': {'type': 'invalid'}} self._check_console_failure( diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py index d07924abe8..b4daad1286 100644 --- a/nova/tests/unit/api/openstack/compute/test_server_actions.py +++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py @@ -66,11 +66,11 @@ class ServerActionsControllerTestV21(test.TestCase): self.controller = self._get_controller() self.compute_api = self.controller.compute_api - # We don't care about anything getting as far as hitting the compute - # RPC API so we just mock it out here. - mock_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi') - mock_rpcapi.start() - self.addCleanup(mock_rpcapi.stop) + # In most of the cases we don't care about anything getting as far as + # hitting the compute RPC API so we just mock it out here. + patcher_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi') + self.mock_rpcapi = patcher_rpcapi.start() + self.addCleanup(patcher_rpcapi.stop) # The project_id here matches what is used by default in # fake_compute_get which need to match for policy checks. self.req = fakes.HTTPRequest.blank('', @@ -1079,21 +1079,23 @@ class ServerActionsControllerTestV21(test.TestCase): snapshot = dict(id=_fake_id('d')) + self.mock_rpcapi.quiesce_instance.side_effect = ( + exception.InstanceQuiesceNotSupported( + instance_id="fake", reason="test" + ) + ) + with test.nested( mock.patch.object( self.controller.compute_api.volume_api, 'get_absolute_limits', return_value={'totalSnapshotsUsed': 0, 'maxTotalSnapshots': 10}), - mock.patch.object(self.controller.compute_api.compute_rpcapi, - 'quiesce_instance', - side_effect=exception.InstanceQuiesceNotSupported( - instance_id='fake', reason='test')), mock.patch.object(self.controller.compute_api.volume_api, 'get', return_value=volume), mock.patch.object(self.controller.compute_api.volume_api, 'create_snapshot_force', return_value=snapshot), - ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create): + ) as (mock_get_limits, mock_vol_get, mock_vol_create): if mock_vol_create_side_effect: mock_vol_create.side_effect = mock_vol_create_side_effect @@ -1125,7 +1127,7 @@ class ServerActionsControllerTestV21(test.TestCase): for k in extra_properties.keys(): self.assertEqual(properties[k], extra_properties[k]) - mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY) + self.mock_rpcapi.quiesce_instance.assert_called_once() mock_vol_get.assert_called_once_with(mock.ANY, volume['id']) mock_vol_create.assert_called_once_with(mock.ANY, volume['id'], mock.ANY, mock.ANY) @@ -1189,21 +1191,23 @@ class ServerActionsControllerTestV21(test.TestCase): snapshot = dict(id=_fake_id('d')) + self.mock_rpcapi.quiesce_instance.side_effect = ( + exception.InstanceQuiesceNotSupported( + instance_id="fake", reason="test" + ) + ) + with test.nested( mock.patch.object( self.controller.compute_api.volume_api, 'get_absolute_limits', return_value={'totalSnapshotsUsed': 0, 'maxTotalSnapshots': 10}), - mock.patch.object(self.controller.compute_api.compute_rpcapi, - 'quiesce_instance', - side_effect=exception.InstanceQuiesceNotSupported( - instance_id='fake', reason='test')), mock.patch.object(self.controller.compute_api.volume_api, 'get', return_value=volume), mock.patch.object(self.controller.compute_api.volume_api, 'create_snapshot_force', return_value=snapshot), - ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create): + ) as (mock_get_limits, mock_vol_get, mock_vol_create): response = self.controller._action_create_image(self.req, FAKE_UUID, body=body) @@ -1218,7 +1222,7 @@ class ServerActionsControllerTestV21(test.TestCase): for key, val in extra_metadata.items(): self.assertEqual(properties[key], val) - mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY) + self.mock_rpcapi.quiesce_instance.assert_called_once() mock_vol_get.assert_called_once_with(mock.ANY, volume['id']) mock_vol_create.assert_called_once_with(mock.ANY, volume['id'], mock.ANY, mock.ANY) diff --git a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py index a0404baffc..81d1939e71 100644 --- a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py +++ b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py @@ -209,7 +209,8 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21): self.flags(driver='nova.quota.UnifiedLimitsDriver', group='quota') self.req = fakes.HTTPRequest.blank('') self.controller = sg_v21.ServerGroupController() - self.useFixture(limit_fixture.LimitFixture({'server_groups': 10}, {})) + self.limit_fixture = self.useFixture( + limit_fixture.LimitFixture({'server_groups': 10}, {})) @mock.patch('nova.limit.local.enforce_db_limit') def test_create_server_group_during_recheck(self, mock_enforce): @@ -236,7 +237,7 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21): delta=1) def test_create_group_fails_with_zero_quota(self): - self.useFixture(limit_fixture.LimitFixture({'server_groups': 0}, {})) + self.limit_fixture.reglimits = {'server_groups': 0} sgroup = {'name': 'test', 'policies': ['anti-affinity']} exc = self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, @@ -245,7 +246,7 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21): self.assertIn(msg, str(exc)) def test_create_only_one_group_when_limit_is_one(self): - self.useFixture(limit_fixture.LimitFixture({'server_groups': 1}, {})) + self.limit_fixture.reglimits = {'server_groups': 1} policies = ['anti-affinity'] sgroup = {'name': 'test', 'policies': policies} res_dict = self.controller.create( diff --git a/nova/tests/unit/api/openstack/compute/test_servers.py b/nova/tests/unit/api/openstack/compute/test_servers.py index 31739ed7ab..4e2a694e15 100644 --- a/nova/tests/unit/api/openstack/compute/test_servers.py +++ b/nova/tests/unit/api/openstack/compute/test_servers.py @@ -2087,10 +2087,10 @@ class ServersControllerTestV216(_ServersControllerTest): return server_dict - @mock.patch('nova.compute.api.API.get_instance_host_status') - def _verify_host_status_policy_behavior(self, func, mock_get_host_status): + def _verify_host_status_policy_behavior(self, func): # Set policy to disallow both host_status cases and verify we don't # call the get_instance_host_status compute RPC API. + self.mock_get_instance_host_status.reset_mock() rules = { 'os_compute_api:servers:show:host_status': '!', 'os_compute_api:servers:show:host_status:unknown-only': '!', @@ -2098,7 +2098,7 @@ class ServersControllerTestV216(_ServersControllerTest): orig_rules = policy.get_rules() policy.set_rules(oslo_policy.Rules.from_dict(rules), overwrite=False) func() - mock_get_host_status.assert_not_called() + self.mock_get_instance_host_status.assert_not_called() # Restore the original rules. policy.set_rules(orig_rules) @@ -2638,15 +2638,13 @@ class ServersControllerTestV275(ControllerTest): microversion = '2.75' - @mock.patch('nova.compute.api.API.get_all') - def test_get_servers_additional_query_param_old_version(self, mock_get): + def test_get_servers_additional_query_param_old_version(self): req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1', use_admin_context=True, version='2.74') self.controller.index(req) - @mock.patch('nova.compute.api.API.get_all') - def test_get_servers_ignore_sort_key_old_version(self, mock_get): + def test_get_servers_ignore_sort_key_old_version(self): req = fakes.HTTPRequest.blank( self.path_with_query % 'sort_key=deleted', use_admin_context=True, version='2.74') @@ -3584,13 +3582,13 @@ class ServersControllerRebuildTestV263(ControllerTest): }, } - @mock.patch('nova.compute.api.API.get') - def _rebuild_server(self, mock_get, certs=None, - conf_enabled=True, conf_certs=None): + def _rebuild_server(self, certs=None, conf_enabled=True, conf_certs=None): ctx = self.req.environ['nova.context'] - mock_get.return_value = fakes.stub_instance_obj(ctx, - vm_state=vm_states.ACTIVE, trusted_certs=certs, - project_id=self.req_project_id, user_id=self.req_user_id) + self.mock_get.side_effect = None + self.mock_get.return_value = fakes.stub_instance_obj( + ctx, vm_state=vm_states.ACTIVE, trusted_certs=certs, + project_id=self.req_project_id, user_id=self.req_user_id + ) self.flags(default_trusted_certificate_ids=conf_certs, group='glance') @@ -3743,10 +3741,10 @@ class ServersControllerRebuildTestV271(ControllerTest): } } - @mock.patch('nova.compute.api.API.get') - def _rebuild_server(self, mock_get): + def _rebuild_server(self): ctx = self.req.environ['nova.context'] - mock_get.return_value = fakes.stub_instance_obj(ctx, + self.mock_get.side_effect = None + self.mock_get.return_value = fakes.stub_instance_obj(ctx, vm_state=vm_states.ACTIVE, project_id=self.req_project_id, user_id=self.req_user_id) server = self.controller._action_rebuild( diff --git a/nova/tests/unit/api/openstack/compute/test_volumes.py b/nova/tests/unit/api/openstack/compute/test_volumes.py index a24c104c93..14d27d8546 100644 --- a/nova/tests/unit/api/openstack/compute/test_volumes.py +++ b/nova/tests/unit/api/openstack/compute/test_volumes.py @@ -1889,8 +1889,7 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase): req, '5') def _test_assisted_delete_instance_conflict(self, api_error): - # unset the stub on volume_snapshot_delete from setUp - self.mock_volume_snapshot_delete.stop() + self.mock_volume_snapshot_delete.side_effect = api_error params = { 'delete_info': jsonutils.dumps({'volume_id': '1'}), } @@ -1899,10 +1898,9 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase): urllib.parse.urlencode(params), version=self.microversion) req.method = 'DELETE' - with mock.patch.object(compute_api.API, 'volume_snapshot_delete', - side_effect=api_error): - self.assertRaises( - webob.exc.HTTPBadRequest, self.controller.delete, req, '5') + + self.assertRaises( + webob.exc.HTTPBadRequest, self.controller.delete, req, '5') def test_assisted_delete_instance_invalid_state(self): api_error = exception.InstanceInvalidState( diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py index ba85590697..2d33c890b7 100644 --- a/nova/tests/unit/cmd/test_status.py +++ b/nova/tests/unit/cmd/test_status.py @@ -502,3 +502,19 @@ class TestCheckMachineTypeUnset(test.NoDBTestCase): upgradecheck.Code.SUCCESS, result.code ) + + +class TestUpgradeCheckServiceUserToken(test.NoDBTestCase): + + def setUp(self): + super().setUp() + self.cmd = status.UpgradeCommands() + + def test_service_user_token_not_configured(self): + result = self.cmd._check_service_user_token() + self.assertEqual(upgradecheck.Code.FAILURE, result.code) + + def test_service_user_token_configured(self): + self.flags(send_service_user_token=True, group='service_user') + result = self.cmd._check_service_user_token() + self.assertEqual(upgradecheck.Code.SUCCESS, result.code) diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py index eb5b0700d4..390dece66d 100644 --- a/nova/tests/unit/compute/test_api.py +++ b/nova/tests/unit/compute/test_api.py @@ -967,6 +967,31 @@ class _ComputeAPIUnitTestMixIn(object): return snapshot_id + def _test_delete(self, delete_type, **attrs): + delete_time = datetime.datetime( + 1955, 11, 5, 9, 30, tzinfo=iso8601.UTC) + timeutils.set_time_override(delete_time) + self.addCleanup(timeutils.clear_time_override) + + with test.nested( + mock.patch.object( + self.compute_api.compute_rpcapi, 'confirm_resize'), + mock.patch.object( + self.compute_api.compute_rpcapi, 'terminate_instance'), + mock.patch.object( + self.compute_api.compute_rpcapi, 'soft_delete_instance'), + ) as ( + mock_confirm, mock_terminate, mock_soft_delete + ): + self._do_delete( + delete_type, + mock_confirm, + mock_terminate, + mock_soft_delete, + delete_time, + **attrs + ) + @mock.patch.object(compute_utils, 'notify_about_instance_action') @mock.patch.object(objects.Migration, 'get_by_instance_and_status') @@ -986,12 +1011,13 @@ class _ComputeAPIUnitTestMixIn(object): @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=[]) @mock.patch.object(objects.Instance, 'save') - def _test_delete(self, delete_type, mock_save, mock_bdm_get, mock_elevated, - mock_get_cn, mock_up, mock_record, mock_inst_update, - mock_deallocate, mock_inst_meta, mock_inst_destroy, - mock_notify_legacy, mock_get_inst, - mock_save_im, mock_image_delete, mock_mig_get, - mock_notify, **attrs): + def _do_delete( + self, delete_type, mock_confirm, mock_terminate, mock_soft_delete, + delete_time, mock_save, mock_bdm_get, mock_elevated, mock_get_cn, + mock_up, mock_record, mock_inst_update, mock_deallocate, + mock_inst_meta, mock_inst_destroy, mock_notify_legacy, mock_get_inst, + mock_save_im, mock_image_delete, mock_mig_get, mock_notify, **attrs + ): expected_save_calls = [mock.call()] expected_record_calls = [] expected_elevated_calls = [] @@ -1001,17 +1027,11 @@ class _ComputeAPIUnitTestMixIn(object): deltas = {'instances': -1, 'cores': -inst.flavor.vcpus, 'ram': -inst.flavor.memory_mb} - delete_time = datetime.datetime(1955, 11, 5, 9, 30, - tzinfo=iso8601.UTC) - self.useFixture(utils_fixture.TimeFixture(delete_time)) task_state = (delete_type == 'soft_delete' and task_states.SOFT_DELETING or task_states.DELETING) updates = {'progress': 0, 'task_state': task_state} if delete_type == 'soft_delete': updates['deleted_at'] = delete_time - rpcapi = self.compute_api.compute_rpcapi - mock_confirm = self.useFixture( - fixtures.MockPatchObject(rpcapi, 'confirm_resize')).mock def _reset_task_state(context, instance, migration, src_host, cast=False): @@ -1026,11 +1046,6 @@ class _ComputeAPIUnitTestMixIn(object): snapshot_id = self._set_delete_shelved_part(inst, mock_image_delete) - mock_terminate = self.useFixture( - fixtures.MockPatchObject(rpcapi, 'terminate_instance')).mock - mock_soft_delete = self.useFixture( - fixtures.MockPatchObject(rpcapi, 'soft_delete_instance')).mock - if inst.task_state == task_states.RESIZE_FINISH: self._test_delete_resizing_part(inst, deltas) @@ -2637,9 +2652,6 @@ class _ComputeAPIUnitTestMixIn(object): rpcapi = self.compute_api.compute_rpcapi - mock_pause = self.useFixture( - fixtures.MockPatchObject(rpcapi, 'pause_instance')).mock - with mock.patch.object(rpcapi, 'pause_instance') as mock_pause: self.compute_api.pause(self.context, instance) @@ -5624,7 +5636,10 @@ class _ComputeAPIUnitTestMixIn(object): destination_type='volume', volume_type=None, snapshot_id=None, volume_id=uuids.volume_id, volume_size=None)]) - rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({}) + rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({ + 'properties': {'hw_rescue_device': 'disk', + 'hw_rescue_bus': 'scsi'} + }) with test.nested( mock.patch.object(self.compute_api.placementclient, @@ -5676,6 +5691,7 @@ class _ComputeAPIUnitTestMixIn(object): # Assert that the instance task state as set in the compute API self.assertEqual(task_states.RESCUING, instance.task_state) + @mock.patch('nova.objects.instance.Instance.image_meta') @mock.patch('nova.objects.compute_node.ComputeNode' '.get_by_host_and_nodename') @mock.patch('nova.compute.utils.is_volume_backed_instance', @@ -5684,7 +5700,8 @@ class _ComputeAPIUnitTestMixIn(object): '.get_by_instance_uuid') def test_rescue_bfv_without_required_trait(self, mock_get_bdms, mock_is_volume_backed, - mock_get_cn): + mock_get_cn, + mock_image_meta): instance = self._create_instance_obj() bdms = objects.BlockDeviceMappingList(objects=[ objects.BlockDeviceMapping( @@ -5692,6 +5709,12 @@ class _ComputeAPIUnitTestMixIn(object): destination_type='volume', volume_type=None, snapshot_id=None, volume_id=uuids.volume_id, volume_size=None)]) + + instance.image_meta = image_meta_obj.ImageMeta.from_dict({ + 'properties': {'hw_rescue_device': 'disk', + 'hw_rescue_bus': 'scsi'} + }) + with test.nested( mock.patch.object(self.compute_api.placementclient, 'get_provider_traits'), @@ -5729,6 +5752,124 @@ class _ComputeAPIUnitTestMixIn(object): mock_get_traits.assert_called_once_with( self.context, uuids.cn) + @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref') + @mock.patch('nova.objects.compute_node.ComputeNode' + '.get_by_host_and_nodename') + @mock.patch('nova.compute.utils.is_volume_backed_instance', + return_value=True) + @mock.patch('nova.objects.block_device.BlockDeviceMappingList' + '.get_by_instance_uuid') + def test_rescue_bfv_with_required_image_properties( + self, mock_get_bdms, mock_is_volume_backed, mock_get_cn, + mock_image_meta_obj_from_ref): + instance = self._create_instance_obj() + bdms = objects.BlockDeviceMappingList(objects=[ + objects.BlockDeviceMapping( + boot_index=0, image_id=uuids.image_id, source_type='image', + destination_type='volume', volume_type=None, + snapshot_id=None, volume_id=uuids.volume_id, + volume_size=None)]) + rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({ + 'properties': {'hw_rescue_device': 'disk', + 'hw_rescue_bus': 'scsi'} + }) + + with test.nested( + mock.patch.object(self.compute_api.placementclient, + 'get_provider_traits'), + mock.patch.object(self.compute_api.volume_api, 'get'), + mock.patch.object(self.compute_api.volume_api, 'check_attached'), + mock.patch.object(instance, 'save'), + mock.patch.object(self.compute_api, '_record_action_start'), + mock.patch.object(self.compute_api.compute_rpcapi, + 'rescue_instance') + ) as ( + mock_get_traits, mock_get_volume, mock_check_attached, + mock_instance_save, mock_record_start, mock_rpcapi_rescue + ): + # Mock out the returned compute node, image_meta, bdms and volume + mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj + mock_get_bdms.return_value = bdms + mock_get_volume.return_value = mock.sentinel.volume + mock_get_cn.return_value = mock.Mock(uuid=uuids.cn) + + # Ensure the required trait is returned, allowing BFV rescue + mock_trait_info = mock.Mock(traits=[ot.COMPUTE_RESCUE_BFV]) + mock_get_traits.return_value = mock_trait_info + + # Try to rescue the instance + self.compute_api.rescue(self.context, instance, + rescue_image_ref=uuids.rescue_image_id, + allow_bfv_rescue=True) + + # Assert all of the calls made in the compute API + mock_get_bdms.assert_called_once_with(self.context, instance.uuid) + mock_get_volume.assert_called_once_with( + self.context, uuids.volume_id) + mock_check_attached.assert_called_once_with( + self.context, mock.sentinel.volume) + mock_is_volume_backed.assert_called_once_with( + self.context, instance, bdms) + mock_get_cn.assert_called_once_with( + self.context, instance.host, instance.node) + mock_get_traits.assert_called_once_with(self.context, uuids.cn) + mock_instance_save.assert_called_once_with( + expected_task_state=[None]) + mock_record_start.assert_called_once_with( + self.context, instance, instance_actions.RESCUE) + mock_rpcapi_rescue.assert_called_once_with( + self.context, instance=instance, rescue_password=None, + rescue_image_ref=uuids.rescue_image_id, clean_shutdown=True) + + # Assert that the instance task state as set in the compute API + self.assertEqual(task_states.RESCUING, instance.task_state) + + @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref') + @mock.patch('nova.compute.utils.is_volume_backed_instance', + return_value=True) + @mock.patch('nova.objects.block_device.BlockDeviceMappingList' + '.get_by_instance_uuid') + def test_rescue_bfv_without_required_image_properties( + self, mock_get_bdms, mock_is_volume_backed, + mock_image_meta_obj_from_ref): + instance = self._create_instance_obj() + bdms = objects.BlockDeviceMappingList(objects=[ + objects.BlockDeviceMapping( + boot_index=0, image_id=uuids.image_id, source_type='image', + destination_type='volume', volume_type=None, + snapshot_id=None, volume_id=uuids.volume_id, + volume_size=None)]) + rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({ + 'properties': {} + }) + + with test.nested( + mock.patch.object(self.compute_api.volume_api, 'get'), + mock.patch.object(self.compute_api.volume_api, 'check_attached'), + ) as ( + mock_get_volume, mock_check_attached + ): + # Mock out the returned bdms, volume and image_meta + mock_get_bdms.return_value = bdms + mock_get_volume.return_value = mock.sentinel.volume + mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj + + # Assert that any attempt to rescue a bfv instance on a compute + # node that does not report the COMPUTE_RESCUE_BFV trait fails and + # raises InstanceNotRescuable + self.assertRaises(exception.InstanceNotRescuable, + self.compute_api.rescue, self.context, instance, + rescue_image_ref=None, allow_bfv_rescue=True) + + # Assert the calls made in the compute API prior to the failure + mock_get_bdms.assert_called_once_with(self.context, instance.uuid) + mock_get_volume.assert_called_once_with( + self.context, uuids.volume_id) + mock_check_attached.assert_called_once_with( + self.context, mock.sentinel.volume) + mock_is_volume_backed.assert_called_once_with( + self.context, instance, bdms) + @mock.patch('nova.compute.utils.is_volume_backed_instance', return_value=True) @mock.patch('nova.objects.block_device.BlockDeviceMappingList' @@ -7741,16 +7882,13 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): self.assertTrue(hasattr(self.compute_api, 'host')) self.assertEqual(CONF.host, self.compute_api.host) - @mock.patch('nova.scheduler.client.report.SchedulerReportClient') + @mock.patch('nova.scheduler.client.report.report_client_singleton') def test_placement_client_init(self, mock_report_client): """Tests to make sure that the construction of the placement client - only happens once per API class instance. + uses the singleton helper, and happens only when needed. """ - self.assertIsNone(self.compute_api._placementclient) - # Access the property twice to make sure SchedulerReportClient is - # only loaded once. - for x in range(2): - self.compute_api.placementclient + self.assertFalse(mock_report_client.called) + self.compute_api.placementclient mock_report_client.assert_called_once_with() def test_validate_host_for_cold_migrate_same_host_fails(self): diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index d8f443843f..f2ea9c3c00 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -5714,13 +5714,15 @@ class ComputeTestCase(BaseTestCase, objects=[objects.PciDevice(vendor_id='1377', product_id='0047', address='0000:0a:00.1', - request_id=uuids.req1)]) + request_id=uuids.req1, + compute_node_id=1)]) new_pci_devices = objects.PciDeviceList( objects=[objects.PciDevice(vendor_id='1377', product_id='0047', address='0000:0b:00.1', - request_id=uuids.req2)]) + request_id=uuids.req2, + compute_node_id=2)]) if expected_pci_addr == old_pci_devices[0].address: expected_pci_device = old_pci_devices[0] @@ -8618,16 +8620,13 @@ class ComputeAPITestCase(BaseTestCase): def test_create_instance_sets_system_metadata(self): # Make sure image properties are copied into system metadata. - with mock.patch.object( - self.compute_api.compute_task_api, 'schedule_and_build_instances', - ) as mock_sbi: - ref, resv_id = self.compute_api.create( - self.context, - flavor=self.default_flavor, - image_href='f5000000-0000-0000-0000-000000000000') + ref, resv_id = self.compute_api.create( + self.context, + flavor=self.default_flavor, + image_href='f5000000-0000-0000-0000-000000000000') - build_call = mock_sbi.call_args_list[0] - instance = build_call[1]['build_requests'][0].instance + build_call = self.schedule_and_build_instances_mock.call_args_list[0] + instance = build_call[1]['build_requests'][0].instance image_props = {'image_kernel_id': uuids.kernel_id, 'image_ramdisk_id': uuids.ramdisk_id, @@ -8637,16 +8636,14 @@ class ComputeAPITestCase(BaseTestCase): self.assertEqual(value, instance.system_metadata[key]) def test_create_saves_flavor(self): - with mock.patch.object( - self.compute_api.compute_task_api, 'schedule_and_build_instances', - ) as mock_sbi: - ref, resv_id = self.compute_api.create( - self.context, - flavor=self.default_flavor, - image_href=uuids.image_href_id) + ref, resv_id = self.compute_api.create( + self.context, + flavor=self.default_flavor, + image_href=uuids.image_href_id) + + build_call = self.schedule_and_build_instances_mock.call_args_list[0] + instance = build_call[1]['build_requests'][0].instance - build_call = mock_sbi.call_args_list[0] - instance = build_call[1]['build_requests'][0].instance self.assertIn('flavor', instance) self.assertEqual(self.default_flavor.flavorid, instance.flavor.flavorid) @@ -8654,19 +8651,18 @@ class ComputeAPITestCase(BaseTestCase): def test_create_instance_associates_security_groups(self): # Make sure create associates security groups. - with test.nested( - mock.patch.object(self.compute_api.compute_task_api, - 'schedule_and_build_instances'), - mock.patch('nova.network.security_group_api.validate_name', - return_value=uuids.secgroup_id), - ) as (mock_sbi, mock_secgroups): + with mock.patch( + "nova.network.security_group_api.validate_name", + return_value=uuids.secgroup_id, + ) as mock_secgroups: self.compute_api.create( self.context, flavor=self.default_flavor, image_href=uuids.image_href_id, security_groups=['testgroup']) - build_call = mock_sbi.call_args_list[0] + build_call = ( + self.schedule_and_build_instances_mock.call_args_list[0]) reqspec = build_call[1]['request_spec'][0] self.assertEqual(1, len(reqspec.security_groups)) @@ -8701,22 +8697,19 @@ class ComputeAPITestCase(BaseTestCase): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=uuids.port_instance)]) - with test.nested( - mock.patch.object( - self.compute_api.compute_task_api, - 'schedule_and_build_instances'), - mock.patch.object( - self.compute_api.network_api, - 'create_resource_requests', - return_value=(None, [], objects.RequestLevelParams())), - ) as (mock_sbi, _mock_create_resreqs): + with mock.patch.object( + self.compute_api.network_api, + "create_resource_requests", + return_value=(None, [], objects.RequestLevelParams()), + ): self.compute_api.create( self.context, flavor=self.default_flavor, image_href=uuids.image_href_id, requested_networks=requested_networks) - build_call = mock_sbi.call_args_list[0] + build_call = ( + self.schedule_and_build_instances_mock.call_args_list[0]) reqspec = build_call[1]['request_spec'][0] self.assertEqual(1, len(reqspec.requested_networks)) @@ -10216,8 +10209,7 @@ class ComputeAPITestCase(BaseTestCase): self.compute_api.get_console_output, self.context, instance) - @mock.patch.object(compute_utils, 'notify_about_instance_action') - def test_attach_interface(self, mock_notify): + def test_attach_interface(self): instance = self._create_fake_instance_obj() nwinfo = [fake_network_cache_model.new_vif()] network_id = nwinfo[0]['network']['id'] @@ -10237,8 +10229,12 @@ class ComputeAPITestCase(BaseTestCase): mock.patch.object( self.compute, "_claim_pci_device_for_interface_attach", - return_value=None) - ) as (cap, mock_lock, mock_create_resource_req, mock_claim_pci): + return_value=None), + mock.patch.object(compute_utils, 'notify_about_instance_action'), + ) as ( + cap, mock_lock, mock_create_resource_req, mock_claim_pci, + mock_notify + ): mock_create_resource_req.return_value = ( None, [], mock.sentinel.req_lvl_params) vif = self.compute.attach_interface(self.context, @@ -11056,8 +11052,7 @@ class ComputeAPITestCase(BaseTestCase): mock_remove_res.assert_called_once_with( self.context, instance.uuid, mock.sentinel.resources) - @mock.patch.object(compute_utils, 'notify_about_instance_action') - def test_detach_interface(self, mock_notify): + def test_detach_interface(self): nwinfo, port_id = self.test_attach_interface() instance = self._create_fake_instance_obj() instance.info_cache = objects.InstanceInfoCache.new( @@ -11090,10 +11085,13 @@ class ComputeAPITestCase(BaseTestCase): mock.patch('nova.pci.request.get_instance_pci_request_from_vif', return_value=pci_req), mock.patch.object(self.compute.rt, 'unclaim_pci_devices'), - mock.patch.object(instance, 'save') + mock.patch.object(instance, 'save'), + mock.patch.object(compute_utils, 'notify_about_instance_action'), ) as ( - mock_remove_alloc, mock_deallocate, mock_lock, - mock_get_pci_req, mock_unclaim_pci, mock_instance_save): + mock_remove_alloc, mock_deallocate, mock_lock, + mock_get_pci_req, mock_unclaim_pci, mock_instance_save, + mock_notify + ): self.compute.detach_interface(self.context, instance, port_id) mock_deallocate.assert_called_once_with( @@ -11900,17 +11898,16 @@ class ComputeAPITestCase(BaseTestCase): instance.save() @mock.patch.object(objects.Service, 'get_by_compute_host') - @mock.patch.object(self.compute_api.compute_task_api, - 'rebuild_instance') @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host') @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(self.compute_api.servicegroup_api, 'service_is_up') - def do_test(service_is_up, get_by_instance_uuid, get_all_by_host, - rebuild_instance, get_service): + def do_test( + service_is_up, get_by_instance_uuid, get_all_by_host, get_service + ): service_is_up.return_value = False get_by_instance_uuid.return_value = fake_spec - rebuild_instance.side_effect = fake_rebuild_instance + self.rebuild_instance_mock.side_effect = fake_rebuild_instance get_all_by_host.return_value = objects.ComputeNodeList( objects=[objects.ComputeNode( host='fake_dest_host', @@ -11928,7 +11925,7 @@ class ComputeAPITestCase(BaseTestCase): host = None else: host = 'fake_dest_host' - rebuild_instance.assert_called_once_with( + self.rebuild_instance_mock.assert_called_once_with( ctxt, instance=instance, new_pass=None, @@ -13046,16 +13043,13 @@ class ComputeAPIAggrTestCase(BaseTestCase): hosts = aggregate.hosts if 'hosts' in aggregate else None self.assertIn(values[0][1][0], hosts) - @mock.patch('nova.scheduler.client.report.SchedulerReportClient') + @mock.patch('nova.scheduler.client.report.report_client_singleton') def test_placement_client_init(self, mock_report_client): """Tests to make sure that the construction of the placement client - only happens once per AggregateAPI class instance. + uses the singleton helper, and happens only when needed. """ - self.assertIsNone(self.api._placement_client) - # Access the property twice to make sure SchedulerReportClient is - # only loaded once. - for x in range(2): - self.api.placement_client + self.assertFalse(mock_report_client.called) + self.api.placement_client mock_report_client.assert_called_once_with() diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index 760ea79e87..62f15d0d93 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -1306,6 +1306,36 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase, self.compute._init_instance(self.context, instance) set_error_state.assert_called_once_with(instance) + def test_init_instance_vif_plug_fails_missing_pci(self): + instance = fake_instance.fake_instance_obj( + self.context, + uuid=uuids.instance, + info_cache=None, + power_state=power_state.RUNNING, + vm_state=vm_states.ACTIVE, + task_state=None, + host=self.compute.host, + expected_attrs=['info_cache']) + + with test.nested( + mock.patch.object(context, 'get_admin_context', + return_value=self.context), + mock.patch.object(objects.Instance, 'get_network_info', + return_value=network_model.NetworkInfo()), + mock.patch.object(self.compute.driver, 'plug_vifs', + side_effect=exception.PciDeviceNotFoundById("pci-addr")), + mock.patch("nova.compute.manager.LOG.exception"), + ) as (get_admin_context, get_nw_info, plug_vifs, log_exception): + # as this does not raise, we are sure that the compute service + # continues initializing the rest of the instances + self.compute._init_instance(self.context, instance) + log_exception.assert_called_once_with( + "Virtual interface plugging failed for instance. Probably the " + "vnic_type of the bound port has been changed. Nova does not " + "support such change.", + instance=instance + ) + def _test__validate_pinning_configuration(self, supports_pcpus=True): instance_1 = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance_1) @@ -7585,6 +7615,27 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): instance, hints) mock_get.assert_called_once_with(self.context, uuids.group_hint) + @mock.patch('nova.objects.InstanceGroup.get_by_hint') + def test_validate_instance_group_policy_deleted_group(self, mock_get): + """Tests that _validate_instance_group_policy handles the case + where the scheduler hint has a group but that group has been deleted. + This tests is a reproducer for bug: #1890244 + """ + instance = objects.Instance(uuid=uuids.instance) + hints = {'group': [uuids.group_hint]} + mock_get.side_effect = exception.InstanceGroupNotFound( + group_uuid=uuids.group_hint + ) + # This implicitly asserts that no exception is raised since + # uncaught exceptions would be treated as a test failure. + self.compute._validate_instance_group_policy( + self.context, instance, hints + ) + # and this just assert that we did in fact invoke the method + # that raises to ensure that if we refactor in the future this + # this test will fail if the function we mock is no longer called. + mock_get.assert_called_once_with(self.context, uuids.group_hint) + @mock.patch('nova.objects.InstanceGroup.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_uuids_by_host') @mock.patch('nova.objects.InstanceGroup.get_by_hint') @@ -8563,11 +8614,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, @mock.patch.object(self.compute.network_api, 'setup_networks_on_host') @mock.patch.object(self.compute.network_api, 'migrate_instance_start') @mock.patch.object(compute_utils, 'notify_usage_exists') - @mock.patch.object(self.migration, 'save') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def do_test(get_by_instance_uuid, - migration_save, notify_usage_exists, migrate_instance_start, setup_networks_on_host, @@ -8639,7 +8688,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, @mock.patch.object(self.compute.network_api, 'migrate_instance_finish', side_effect=_migrate_instance_finish) @mock.patch.object(self.compute.network_api, 'setup_networks_on_host') - @mock.patch.object(self.migration, 'save') @mock.patch.object(self.instance, 'save') @mock.patch.object(self.compute, '_set_instance_info') @mock.patch.object(db, 'instance_fault_create') @@ -8653,7 +8701,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, fault_create, set_instance_info, instance_save, - migration_save, setup_networks_on_host, migrate_instance_finish, get_instance_nw_info, @@ -8697,11 +8744,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, @mock.patch.object(self.compute.network_api, 'migrate_instance_start') @mock.patch.object(compute_utils, 'notify_usage_exists') @mock.patch.object(db, 'instance_extra_update_by_uuid') - @mock.patch.object(self.migration, 'save') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def do_revert_resize(mock_get_by_instance_uuid, - mock_migration_save, mock_extra_update, mock_notify_usage_exists, mock_migrate_instance_start, @@ -8748,7 +8793,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, @mock.patch.object(compute_utils, 'notify_about_instance_action') @mock.patch.object(self.compute, "_set_instance_info") @mock.patch.object(self.instance, 'save') - @mock.patch.object(self.migration, 'save') @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(db, 'instance_fault_create') @mock.patch.object(db, 'instance_extra_update_by_uuid') @@ -8772,7 +8816,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, mock_extra_update, mock_fault_create, mock_fault_from_exc, - mock_mig_save, mock_inst_save, mock_set, mock_notify_about_instance_action, @@ -8866,7 +8909,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, @mock.patch.object(self.compute, '_delete_scheduler_instance_info') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.Migration.get_by_id') - @mock.patch.object(self.migration, 'save') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, 'network_api') @mock.patch.object(self.compute.driver, 'confirm_migration') @@ -8875,7 +8917,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, @mock.patch.object(self.instance, 'save') def do_confirm_resize(mock_save, mock_drop, mock_delete, mock_confirm, mock_nwapi, mock_notify, - mock_mig_save, mock_mig_get, mock_inst_get, + mock_mig_get, mock_inst_get, mock_delete_scheduler_info): self._mock_rt() @@ -8958,16 +9000,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, instance_get_by_uuid.assert_called_once() def test_confirm_resize_calls_virt_driver_with_old_pci(self): - @mock.patch.object(self.migration, 'save') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, 'network_api') @mock.patch.object(self.compute.driver, 'confirm_migration') @mock.patch.object(self.compute, '_delete_allocation_after_move') @mock.patch.object(self.instance, 'drop_migration_context') @mock.patch.object(self.instance, 'save') - def do_confirm_resize(mock_save, mock_drop, mock_delete, - mock_confirm, mock_nwapi, mock_notify, - mock_mig_save): + def do_confirm_resize( + mock_save, mock_drop, mock_delete, mock_confirm, mock_nwapi, + mock_notify + ): # Mock virt driver confirm_resize() to save the provided # network_info, we will check it later. updated_nw_info = [] @@ -8983,10 +9025,12 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, self._mock_rt() old_devs = objects.PciDeviceList( objects=[objects.PciDevice( + compute_node_id=1, address='0000:04:00.2', request_id=uuids.pcidev1)]) new_devs = objects.PciDeviceList( objects=[objects.PciDevice( + compute_node_id=2, address='0000:05:00.3', request_id=uuids.pcidev1)]) self.instance.migration_context = objects.MigrationContext( @@ -9958,6 +10002,27 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, self.instance, migration) + def test_post_live_migration_update_host(self): + @mock.patch.object(self.compute, '_get_compute_info') + def _test_post_live_migration(_get_compute_info): + dest_host = 'dest' + cn = objects.ComputeNode(hypervisor_hostname=dest_host) + _get_compute_info.return_value = cn + instance = fake_instance.fake_instance_obj(self.context, + node='src', + uuid=uuids.instance) + with mock.patch.object(self.compute, "_post_live_migration" + ) as plm, mock.patch.object(instance, "save") as save: + error = ValueError("some failure") + plm.side_effect = error + self.assertRaises( + ValueError, self.compute._post_live_migration_update_host, + self.context, instance, dest_host) + save.assert_called_once() + self.assertEqual(instance.host, dest_host) + + _test_post_live_migration() + def test_post_live_migration_cinder_pre_344_api(self): # Because live migration has # succeeded,_post_live_migration_remove_source_vol_connections() @@ -10957,40 +11022,94 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, _test() def test__update_migrate_vifs_profile_with_pci(self): - # Define two migrate vifs with only one pci that is required - # to be updated. Make sure method under test updated the correct one + # Define three migrate vifs with two pci devs that are required + # to be updated, one VF and on PF. + # Make sure method under test updated the correct devs with the correct + # values. nw_vifs = network_model.NetworkInfo( - [network_model.VIF( - id=uuids.port0, - vnic_type='direct', - type=network_model.VIF_TYPE_HW_VEB, - profile={'pci_slot': '0000:04:00.3', - 'pci_vendor_info': '15b3:1018', - 'physical_network': 'default'}), - network_model.VIF( - id=uuids.port1, - vnic_type='normal', - type=network_model.VIF_TYPE_OVS, - profile={'some': 'attribute'})]) - pci_dev = objects.PciDevice(request_id=uuids.pci_req, - address='0000:05:00.4', - vendor_id='15b3', - product_id='1018') - port_id_to_pci_dev = {uuids.port0: pci_dev} - mig_vifs = migrate_data_obj.VIFMigrateData.\ - create_skeleton_migrate_vifs(nw_vifs) - self.compute._update_migrate_vifs_profile_with_pci(mig_vifs, - port_id_to_pci_dev) + [ + network_model.VIF( + id=uuids.port0, + vnic_type='direct', + type=network_model.VIF_TYPE_HW_VEB, + profile={ + 'pci_slot': '0000:04:00.3', + 'pci_vendor_info': '15b3:1018', + 'physical_network': 'default', + }, + ), + network_model.VIF( + id=uuids.port1, + vnic_type='normal', + type=network_model.VIF_TYPE_OVS, + profile={'some': 'attribute'}, + ), + network_model.VIF( + id=uuids.port2, + vnic_type='direct-physical', + type=network_model.VIF_TYPE_HOSTDEV, + profile={ + 'pci_slot': '0000:01:00', + 'pci_vendor_info': '8086:154d', + 'physical_network': 'physnet2', + }, + ), + ] + ) + + pci_vf_dev = objects.PciDevice( + request_id=uuids.pci_req, + address='0000:05:00.4', + parent_addr='0000:05:00', + vendor_id='15b3', + product_id='1018', + compute_node_id=13, + dev_type=fields.PciDeviceType.SRIOV_VF, + ) + pci_pf_dev = objects.PciDevice( + request_id=uuids.pci_req2, + address='0000:01:00', + parent_addr='0000:02:00', + vendor_id='8086', + product_id='154d', + compute_node_id=13, + dev_type=fields.PciDeviceType.SRIOV_PF, + extra_info={'mac_address': 'b4:96:91:34:f4:36'}, + ) + port_id_to_pci_dev = { + uuids.port0: pci_vf_dev, + uuids.port2: pci_pf_dev, + } + mig_vifs = ( + migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs( + nw_vifs) + ) + + self.compute._update_migrate_vifs_profile_with_pci( + mig_vifs, port_id_to_pci_dev) + # Make sure method under test updated the correct one. - changed_mig_vif = mig_vifs[0] + changed_vf_mig_vif = mig_vifs[0] unchanged_mig_vif = mig_vifs[1] + changed_pf_mig_vif = mig_vifs[2] # Migrate vifs profile was updated with pci_dev.address # for port ID uuids.port0. - self.assertEqual(changed_mig_vif.profile['pci_slot'], - pci_dev.address) + self.assertEqual(changed_vf_mig_vif.profile['pci_slot'], + pci_vf_dev.address) + # MAC is not added as this is a VF + self.assertNotIn('device_mac_address', changed_vf_mig_vif.profile) # Migrate vifs profile was unchanged for port ID uuids.port1. # i.e 'profile' attribute does not exist. self.assertNotIn('profile', unchanged_mig_vif) + # Migrate vifs profile was updated with pci_dev.address + # for port ID uuids.port2. + self.assertEqual(changed_pf_mig_vif.profile['pci_slot'], + pci_pf_dev.address) + # MAC is updated as this is a PF + self.assertEqual( + 'b4:96:91:34:f4:36', + changed_pf_mig_vif.profile['device_mac_address'] + ) def test_get_updated_nw_info_with_pci_mapping(self): old_dev = objects.PciDevice(address='0000:04:00.2') diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py index caa12cb754..5aab64e72c 100644 --- a/nova/tests/unit/compute/test_resource_tracker.py +++ b/nova/tests/unit/compute/test_resource_tracker.py @@ -4205,9 +4205,9 @@ class TestCleanComputeNodeCache(BaseTestCase): invalid_nodename = "invalid-node" self.rt.compute_nodes[_NODENAME] = self.compute self.rt.compute_nodes[invalid_nodename] = mock.sentinel.compute - with mock.patch.object( - self.rt.reportclient, "invalidate_resource_provider", - ) as mock_invalidate: - self.rt.clean_compute_node_cache([self.compute]) - mock_remove.assert_called_once_with(invalid_nodename) - mock_invalidate.assert_called_once_with(invalid_nodename) + mock_invalidate = self.rt.reportclient.invalidate_resource_provider + + self.rt.clean_compute_node_cache([self.compute]) + + mock_remove.assert_called_once_with(invalid_nodename) + mock_invalidate.assert_called_once_with(invalid_nodename) diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py index cb40c076c8..dd4ee7c3fe 100644 --- a/nova/tests/unit/conductor/tasks/test_live_migrate.py +++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py @@ -345,6 +345,36 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase): mock.call(self.destination)], mock_get_info.call_args_list) + @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') + def test_skip_hypervisor_version_check_on_lm_raise_ex(self, mock_get_info): + host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} + host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} + self.flags(group='workarounds', + skip_hypervisor_version_check_on_lm=False) + mock_get_info.side_effect = [objects.ComputeNode(**host1), + objects.ComputeNode(**host2)] + self.assertRaises(exception.DestinationHypervisorTooOld, + self.task._check_compatible_with_source_hypervisor, + self.destination) + self.assertEqual([mock.call(self.instance_host), + mock.call(self.destination)], + mock_get_info.call_args_list) + + @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') + def test_skip_hypervisor_version_check_on_lm_do_not_raise_ex( + self, mock_get_info + ): + host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} + host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} + self.flags(group='workarounds', + skip_hypervisor_version_check_on_lm=True) + mock_get_info.side_effect = [objects.ComputeNode(**host1), + objects.ComputeNode(**host2)] + self.task._check_compatible_with_source_hypervisor(self.destination) + self.assertEqual([mock.call(self.instance_host), + mock.call(self.destination)], + mock_get_info.call_args_list) + @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') def test_check_requested_destination(self, mock_check): diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py index 15aa960aad..8c954db9a7 100644 --- a/nova/tests/unit/conductor/test_conductor.py +++ b/nova/tests/unit/conductor/test_conductor.py @@ -17,6 +17,8 @@ import copy +import ddt +from keystoneauth1 import exceptions as ks_exc import mock from oslo_db import exception as db_exc from oslo_limit import exception as limit_exceptions @@ -52,6 +54,7 @@ from nova.objects import block_device as block_device_obj from nova.objects import fields from nova.objects import request_spec from nova.scheduler.client import query +from nova.scheduler.client import report from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests import fixtures @@ -4869,3 +4872,35 @@ class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase): logtext) self.assertIn('host3\' because it is not up', logtext) self.assertIn('image1 failed 1 times', logtext) + + +@ddt.ddt +class TestConductorTaskManager(test.NoDBTestCase): + def test_placement_client_startup(self): + self.assertIsNone(report.PLACEMENTCLIENT) + conductor_manager.ComputeTaskManager() + self.assertIsNotNone(report.PLACEMENTCLIENT) + + @ddt.data(ks_exc.MissingAuthPlugin, + ks_exc.Unauthorized, + test.TestingException) + def test_placement_client_startup_fatals(self, exc): + self.assertRaises(exc, + self._test_placement_client_startup_exception, exc) + + @ddt.data(ks_exc.EndpointNotFound, + ks_exc.DiscoveryFailure, + ks_exc.RequestTimeout, + ks_exc.GatewayTimeout, + ks_exc.ConnectFailure) + def test_placement_client_startup_non_fatal(self, exc): + self._test_placement_client_startup_exception(exc) + + @mock.patch.object(report, 'LOG') + def _test_placement_client_startup_exception(self, exc, mock_log): + with mock.patch.object(report.SchedulerReportClient, '_create_client', + side_effect=exc): + try: + conductor_manager.ComputeTaskManager() + finally: + mock_log.error.assert_called_once() diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py index e05ae520d9..0c897e3e91 100644 --- a/nova/tests/unit/console/test_websocketproxy.py +++ b/nova/tests/unit/console/test_websocketproxy.py @@ -589,12 +589,12 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase): self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with('<socket>') - def test_reject_open_redirect(self): + def test_reject_open_redirect(self, url='//example.com/%2F..'): # This will test the behavior when an attempt is made to cause an open # redirect. It should be rejected. mock_req = mock.MagicMock() mock_req.makefile().readline.side_effect = [ - b'GET //example.com/%2F.. HTTP/1.1\r\n', + f'GET {url} HTTP/1.1\r\n'.encode('utf-8'), b'' ] @@ -619,41 +619,32 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase): result = output.readlines() # Verify no redirect happens and instead a 400 Bad Request is returned. - self.assertIn('400 URI must not start with //', result[0].decode()) + # NOTE: As of python 3.10.6 there is a fix for this vulnerability, + # which will cause a 301 Moved Permanently error to be returned + # instead that redirects to a sanitized version of the URL with extra + # leading '/' characters removed. + # See https://github.com/python/cpython/issues/87389 for details. + # We will consider either response to be valid for this test. This will + # also help if and when the above fix gets backported to older versions + # of python. + errmsg = result[0].decode() + expected_nova = '400 URI must not start with //' + expected_cpython = '301 Moved Permanently' + + self.assertTrue(expected_nova in errmsg or expected_cpython in errmsg) + + # If we detect the cpython fix, verify that the redirect location is + # now the same url but with extra leading '/' characters removed. + if expected_cpython in errmsg: + location = result[3].decode() + location = location.removeprefix('Location: ').rstrip('\r\n') + self.assertTrue( + location.startswith('/example.com/%2F..'), + msg='Redirect location is not the expected sanitized URL', + ) def test_reject_open_redirect_3_slashes(self): - # This will test the behavior when an attempt is made to cause an open - # redirect. It should be rejected. - mock_req = mock.MagicMock() - mock_req.makefile().readline.side_effect = [ - b'GET ///example.com/%2F.. HTTP/1.1\r\n', - b'' - ] - - # Collect the response data to verify at the end. The - # SimpleHTTPRequestHandler writes the response data by calling the - # request socket sendall() method. - self.data = b'' - - def fake_sendall(data): - self.data += data - - mock_req.sendall.side_effect = fake_sendall - - client_addr = ('8.8.8.8', 54321) - mock_server = mock.MagicMock() - # This specifies that the server will be able to handle requests other - # than only websockets. - mock_server.only_upgrade = False - - # Constructing a handler will process the mock_req request passed in. - websocketproxy.NovaProxyRequestHandler( - mock_req, client_addr, mock_server) - - # Verify no redirect happens and instead a 400 Bad Request is returned. - self.data = self.data.decode() - self.assertIn('Error code: 400', self.data) - self.assertIn('Message: URI must not start with //', self.data) + self.test_reject_open_redirect(url='///example.com/%2F..') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_no_compute_rpcapi_with_invalid_token(self, mock_validate): diff --git a/nova/tests/unit/db/main/test_api.py b/nova/tests/unit/db/main/test_api.py index c9a9e83154..e869d0403c 100644 --- a/nova/tests/unit/db/main/test_api.py +++ b/nova/tests/unit/db/main/test_api.py @@ -279,33 +279,21 @@ class DecoratorTestCase(test.TestCase): 'No DB access allowed in ', mock_log.error.call_args[0][0]) - @mock.patch.object(db, 'LOG') - @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True) - def test_pick_context_manager_writer_disable_db_access( - self, mock_DISABLE_DB_ACCESS, mock_log, - ): + def test_pick_context_manager_writer_disable_db_access(self): @db.pick_context_manager_writer def func(context, value): pass self._test_pick_context_manager_disable_db_access(func) - @mock.patch.object(db, 'LOG') - @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True) - def test_pick_context_manager_reader_disable_db_access( - self, mock_DISABLE_DB_ACCESS, mock_log, - ): + def test_pick_context_manager_reader_disable_db_access(self): @db.pick_context_manager_reader def func(context, value): pass self._test_pick_context_manager_disable_db_access(func) - @mock.patch.object(db, 'LOG') - @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True) - def test_pick_context_manager_reader_allow_async_disable_db_access( - self, mock_DISABLE_DB_ACCESS, mock_log, - ): + def test_pick_context_manager_reader_allow_async_disable_db_access(self): @db.pick_context_manager_reader_allow_async def func(context, value): pass diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py index 40137cef39..458777c3a3 100644 --- a/nova/tests/unit/network/test_neutron.py +++ b/nova/tests/unit/network/test_neutron.py @@ -143,6 +143,22 @@ class TestNeutronClient(test.NoDBTestCase): self.assertIsInstance(cl.httpclient.auth, service_token.ServiceTokenAuthWrapper) + @mock.patch('nova.service_auth._SERVICE_AUTH') + @mock.patch('nova.network.neutron._ADMIN_AUTH') + @mock.patch.object(ks_loading, 'load_auth_from_conf_options') + def test_admin_with_service_token( + self, mock_load, mock_admin_auth, mock_service_auth + ): + self.flags(send_service_user_token=True, group='service_user') + + admin_context = context.get_admin_context() + + cl = neutronapi.get_client(admin_context) + self.assertIsInstance(cl.httpclient.auth, + service_token.ServiceTokenAuthWrapper) + self.assertEqual(mock_admin_auth, cl.httpclient.auth.user_auth) + self.assertEqual(mock_service_auth, cl.httpclient.auth.service_auth) + @mock.patch.object(client.Client, "list_networks", side_effect=exceptions.Unauthorized()) def test_Unauthorized_user(self, mock_list_networks): @@ -3383,6 +3399,155 @@ class TestAPI(TestAPIBase): mocked_client.list_ports.assert_called_once_with( tenant_id=uuids.fake, device_id=uuids.instance) + @mock.patch.object( + neutronapi.API, + '_get_physnet_tunneled_info', + new=mock.Mock(return_value=(None, False))) + @mock.patch.object( + neutronapi.API, + '_get_preexisting_port_ids', + new=mock.Mock(return_value=[])) + @mock.patch.object( + neutronapi.API, + '_get_subnets_from_port', + new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')])) + @mock.patch.object( + neutronapi.API, + '_get_floating_ips_by_fixed_and_port', + new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}])) + @mock.patch.object(neutronapi, 'get_client') + def test_build_network_info_model_full_vnic_type_change( + self, mock_get_client + ): + mocked_client = mock.create_autospec(client.Client) + mock_get_client.return_value = mocked_client + fake_inst = objects.Instance() + fake_inst.project_id = uuids.fake + fake_inst.uuid = uuids.instance + fake_ports = [ + { + "id": "port1", + "network_id": "net-id", + "tenant_id": uuids.fake, + "admin_state_up": True, + "status": "ACTIVE", + "fixed_ips": [{"ip_address": "1.1.1.1"}], + "mac_address": "de:ad:be:ef:00:01", + "binding:vif_type": model.VIF_TYPE_BRIDGE, + "binding:vnic_type": model.VNIC_TYPE_DIRECT, + "binding:vif_details": {}, + }, + ] + mocked_client.list_ports.return_value = {'ports': fake_ports} + fake_inst.info_cache = objects.InstanceInfoCache.new( + self.context, uuids.instance) + fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([]) + + # build the network info first + nw_infos = self.api._build_network_info_model( + self.context, + fake_inst, + force_refresh=True, + ) + + self.assertEqual(1, len(nw_infos)) + fake_inst.info_cache.network_info = nw_infos + + # change the vnic_type of the port and rebuild the network info + fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP + with mock.patch( + "nova.network.neutron.API._log_error_if_vnic_type_changed" + ) as mock_log: + nw_infos = self.api._build_network_info_model( + self.context, + fake_inst, + force_refresh=True, + ) + + mock_log.assert_called_once_with( + fake_ports[0]["id"], "direct", "macvtap", fake_inst) + self.assertEqual(1, len(nw_infos)) + + @mock.patch.object( + neutronapi.API, + '_get_physnet_tunneled_info', + new=mock.Mock(return_value=(None, False))) + @mock.patch.object( + neutronapi.API, + '_get_preexisting_port_ids', + new=mock.Mock(return_value=[])) + @mock.patch.object( + neutronapi.API, + '_get_subnets_from_port', + new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')])) + @mock.patch.object( + neutronapi.API, + '_get_floating_ips_by_fixed_and_port', + new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}])) + @mock.patch.object(neutronapi, 'get_client') + def test_build_network_info_model_single_vnic_type_change( + self, mock_get_client + ): + mocked_client = mock.create_autospec(client.Client) + mock_get_client.return_value = mocked_client + fake_inst = objects.Instance() + fake_inst.project_id = uuids.fake + fake_inst.uuid = uuids.instance + fake_ports = [ + { + "id": "port1", + "network_id": "net-id", + "tenant_id": uuids.fake, + "admin_state_up": True, + "status": "ACTIVE", + "fixed_ips": [{"ip_address": "1.1.1.1"}], + "mac_address": "de:ad:be:ef:00:01", + "binding:vif_type": model.VIF_TYPE_BRIDGE, + "binding:vnic_type": model.VNIC_TYPE_DIRECT, + "binding:vif_details": {}, + }, + ] + fake_nets = [ + { + "id": "net-id", + "name": "foo", + "tenant_id": uuids.fake, + } + ] + mocked_client.list_ports.return_value = {'ports': fake_ports} + fake_inst.info_cache = objects.InstanceInfoCache.new( + self.context, uuids.instance) + fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([]) + + # build the network info first + nw_infos = self.api._build_network_info_model( + self.context, + fake_inst, + fake_nets, + [fake_ports[0]["id"]], + refresh_vif_id=fake_ports[0]["id"], + ) + + self.assertEqual(1, len(nw_infos)) + fake_inst.info_cache.network_info = nw_infos + + # change the vnic_type of the port and rebuild the network info + fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP + with mock.patch( + "nova.network.neutron.API._log_error_if_vnic_type_changed" + ) as mock_log: + nw_infos = self.api._build_network_info_model( + self.context, + fake_inst, + fake_nets, + [fake_ports[0]["id"]], + refresh_vif_id=fake_ports[0]["id"], + ) + + mock_log.assert_called_once_with( + fake_ports[0]["id"], "direct", "macvtap", fake_inst) + self.assertEqual(1, len(nw_infos)) + @mock.patch.object(neutronapi, 'get_client') def test_get_subnets_from_port(self, mock_get_client): mocked_client = mock.create_autospec(client.Client) @@ -4809,6 +4974,174 @@ class TestAPI(TestAPIBase): 'nova.network.neutron.API.has_extended_resource_request_extension', new=mock.Mock(return_value=False), ) + @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec') + @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) + def test_update_port_bindings_for_instance_with_sriov_pf( + self, get_client_mock, get_pci_device_devspec_mock + ): + devspec = mock.Mock() + devspec.get_tags.return_value = {'physical_network': 'physnet1'} + get_pci_device_devspec_mock.return_value = devspec + + instance = fake_instance.fake_instance_obj(self.context) + instance.migration_context = objects.MigrationContext() + instance.migration_context.old_pci_devices = objects.PciDeviceList( + objects=[ + objects.PciDevice( + vendor_id='8086', + product_id='154d', + address='0000:0a:01', + compute_node_id=1, + request_id=uuids.pci_req, + dev_type=obj_fields.PciDeviceType.SRIOV_PF, + extra_info={'mac_address': 'b4:96:91:34:f4:36'}, + ) + ] + ) + instance.pci_devices = instance.migration_context.old_pci_devices + instance.migration_context.new_pci_devices = objects.PciDeviceList( + objects=[ + objects.PciDevice( + vendor_id='8086', + product_id='154d', + address='0000:0a:02', + compute_node_id=2, + request_id=uuids.pci_req, + dev_type=obj_fields.PciDeviceType.SRIOV_PF, + extra_info={'mac_address': 'b4:96:91:34:f4:dd'}, + ) + ] + ) + instance.pci_devices = instance.migration_context.new_pci_devices + + fake_ports = { + 'ports': [ + { + 'id': uuids.port, + 'binding:vnic_type': 'direct-physical', + constants.BINDING_HOST_ID: 'fake-host-old', + constants.BINDING_PROFILE: { + 'pci_slot': '0000:0a:01', + 'physical_network': 'old_phys_net', + 'pci_vendor_info': 'old_pci_vendor_info', + }, + }, + ] + } + + migration = objects.Migration( + status='confirmed', migration_type='migration') + list_ports_mock = mock.Mock(return_value=fake_ports) + get_client_mock.return_value.list_ports = list_ports_mock + + update_port_mock = mock.Mock() + get_client_mock.return_value.update_port = update_port_mock + + self.api._update_port_binding_for_instance( + self.context, instance, instance.host, migration) + + # Assert that update_port is called with the binding:profile + # corresponding to the PCI device specified including MAC address. + update_port_mock.assert_called_once_with( + uuids.port, + { + 'port': { + constants.BINDING_HOST_ID: 'fake-host', + 'device_owner': 'compute:%s' % instance.availability_zone, + constants.BINDING_PROFILE: { + 'pci_slot': '0000:0a:02', + 'physical_network': 'physnet1', + 'pci_vendor_info': '8086:154d', + 'device_mac_address': 'b4:96:91:34:f4:dd', + }, + } + }, + ) + + @mock.patch( + 'nova.network.neutron.API.has_extended_resource_request_extension', + new=mock.Mock(return_value=False), + ) + @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec') + @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) + def test_update_port_bindings_for_instance_with_sriov_pf_no_migration( + self, get_client_mock, get_pci_device_devspec_mock + ): + devspec = mock.Mock() + devspec.get_tags.return_value = {'physical_network': 'physnet1'} + get_pci_device_devspec_mock.return_value = devspec + + instance = fake_instance.fake_instance_obj(self.context) + instance.pci_requests = objects.InstancePCIRequests( + instance_uuid=instance.uuid, + requests=[ + objects.InstancePCIRequest( + requester_id=uuids.port, + request_id=uuids.pci_req, + ) + ], + ) + instance.pci_devices = objects.PciDeviceList( + objects=[ + objects.PciDevice( + vendor_id='8086', + product_id='154d', + address='0000:0a:02', + compute_node_id=2, + request_id=uuids.pci_req, + dev_type=obj_fields.PciDeviceType.SRIOV_PF, + extra_info={'mac_address': 'b4:96:91:34:f4:36'}, + ) + ] + ) + + fake_ports = { + 'ports': [ + { + 'id': uuids.port, + 'binding:vnic_type': 'direct-physical', + constants.BINDING_HOST_ID: 'fake-host-old', + constants.BINDING_PROFILE: { + 'pci_slot': '0000:0a:01', + 'physical_network': 'old_phys_net', + 'pci_vendor_info': 'old_pci_vendor_info', + 'device_mac_address': 'b4:96:91:34:f4:dd' + }, + }, + ] + } + + list_ports_mock = mock.Mock(return_value=fake_ports) + get_client_mock.return_value.list_ports = list_ports_mock + + update_port_mock = mock.Mock() + get_client_mock.return_value.update_port = update_port_mock + + self.api._update_port_binding_for_instance( + self.context, instance, instance.host) + + # Assert that update_port is called with the binding:profile + # corresponding to the PCI device specified including MAC address. + update_port_mock.assert_called_once_with( + uuids.port, + { + 'port': { + constants.BINDING_HOST_ID: 'fake-host', + 'device_owner': 'compute:%s' % instance.availability_zone, + constants.BINDING_PROFILE: { + 'pci_slot': '0000:0a:02', + 'physical_network': 'physnet1', + 'pci_vendor_info': '8086:154d', + 'device_mac_address': 'b4:96:91:34:f4:36', + }, + } + }, + ) + + @mock.patch( + 'nova.network.neutron.API.has_extended_resource_request_extension', + new=mock.Mock(return_value=False), + ) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_update_port_bindings_for_instance_with_resource_req( self, get_client_mock): @@ -7190,23 +7523,21 @@ class TestAPI(TestAPIBase): request_id=uuids.pci_request_id) bad_request = objects.InstancePCIRequest( requester_id=uuids.wrong_port_id) - device = objects.PciDevice(request_id=uuids.pci_request_id, - address='fake-pci-address') + device = objects.PciDevice(request_id=uuids.pci_request_id) bad_device = objects.PciDevice(request_id=uuids.wrong_request_id) # Test the happy path instance = objects.Instance( pci_requests=objects.InstancePCIRequests(requests=[request]), pci_devices=objects.PciDeviceList(objects=[device])) self.assertEqual( - 'fake-pci-address', - self.api._get_port_pci_dev( - self.context, instance, fake_port).address) + device, + self.api._get_port_pci_dev(instance, fake_port)) # Test not finding the request instance = objects.Instance( pci_requests=objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(bad_request)])) self.assertIsNone( - self.api._get_port_pci_dev(self.context, instance, fake_port)) + self.api._get_port_pci_dev(instance, fake_port)) mock_debug.assert_called_with('No PCI request found for port %s', uuids.fake_port_id, instance=instance) mock_debug.reset_mock() @@ -7215,7 +7546,7 @@ class TestAPI(TestAPIBase): pci_requests=objects.InstancePCIRequests(requests=[request]), pci_devices=objects.PciDeviceList(objects=[bad_device])) self.assertIsNone( - self.api._get_port_pci_dev(self.context, instance, fake_port)) + self.api._get_port_pci_dev(instance, fake_port)) mock_debug.assert_called_with('No PCI device found for request %s', uuids.pci_request_id, instance=instance) @@ -7740,6 +8071,45 @@ class TestAPIPortbinding(TestAPIBase): port_req_body['port'][ constants.BINDING_PROFILE]) + @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec') + @mock.patch.object(pci_manager, 'get_instance_pci_devs') + def test_populate_neutron_extension_values_binding_sriov_pf( + self, mock_get_instance_pci_devs, mock_get_devspec + ): + host_id = 'my_host_id' + instance = {'host': host_id} + port_req_body = {'port': {}} + + pci_dev = objects.PciDevice( + request_id=uuids.pci_req, + address='0000:01:00', + parent_addr='0000:02:00', + vendor_id='8086', + product_id='154d', + dev_type=obj_fields.PciDeviceType.SRIOV_PF, + extra_info={'mac_address': 'b4:96:91:34:f4:36'} + ) + + expected_profile = { + 'pci_vendor_info': '8086:154d', + 'pci_slot': '0000:01:00', + 'physical_network': 'physnet1', + 'device_mac_address': 'b4:96:91:34:f4:36', + } + + mock_get_instance_pci_devs.return_value = [pci_dev] + devspec = mock.Mock() + devspec.get_tags.return_value = {'physical_network': 'physnet1'} + mock_get_devspec.return_value = devspec + + self.api._populate_neutron_binding_profile( + instance, uuids.pci_req, port_req_body, None) + + self.assertEqual( + expected_profile, + port_req_body['port'][constants.BINDING_PROFILE] + ) + @mock.patch.object( pci_utils, 'get_vf_num_by_pci_address', new=mock.MagicMock(side_effect=(lambda vf_a: 1 @@ -7867,21 +8237,29 @@ class TestAPIPortbinding(TestAPIBase): devspec.get_tags.return_value = {'physical_network': 'physnet1'} mock_get_pci_device_devspec.return_value = devspec - pci_dev = {'vendor_id': 'a2d6', - 'product_id': '15b3', - 'address': '0000:0a:00.0', - 'card_serial_number': 'MT2113X00000', - 'dev_type': obj_fields.PciDeviceType.SRIOV_PF, - } - PciDevice = collections.namedtuple('PciDevice', - ['vendor_id', 'product_id', 'address', - 'card_serial_number', 'dev_type']) - mydev = PciDevice(**pci_dev) + pci_dev = objects.PciDevice( + request_id=uuids.pci_req, + address='0000:0a:00.0', + parent_addr='0000:02:00', + vendor_id='a2d6', + product_id='15b3', + dev_type=obj_fields.PciDeviceType.SRIOV_PF, + extra_info={ + 'capabilities': jsonutils.dumps( + {'card_serial_number': 'MT2113X00000'}), + 'mac_address': 'b4:96:91:34:f4:36', + }, - self.assertEqual({'pci_slot': '0000:0a:00.0', - 'pci_vendor_info': 'a2d6:15b3', - 'physical_network': 'physnet1'}, - self.api._get_pci_device_profile(mydev)) + ) + self.assertEqual( + { + 'pci_slot': '0000:0a:00.0', + 'pci_vendor_info': 'a2d6:15b3', + 'physical_network': 'physnet1', + 'device_mac_address': 'b4:96:91:34:f4:36', + }, + self.api._get_pci_device_profile(pci_dev), + ) @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec') @mock.patch.object(pci_manager, 'get_instance_pci_devs') diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py index 31797f8133..e51b5c3368 100644 --- a/nova/tests/unit/objects/test_request_spec.py +++ b/nova/tests/unit/objects/test_request_spec.py @@ -615,6 +615,30 @@ class _TestRequestSpecObject(object): self.assertIsInstance(req_obj.instance_group, objects.InstanceGroup) self.assertEqual('fresh', req_obj.instance_group.name) + @mock.patch.object( + request_spec.RequestSpec, '_get_by_instance_uuid_from_db' + ) + @mock.patch('nova.objects.InstanceGroup.get_by_uuid') + def test_get_by_instance_uuid_deleted_group( + self, mock_get_ig, get_by_uuid + ): + fake_spec_obj = fake_request_spec.fake_spec_obj() + fake_spec_obj.scheduler_hints['group'] = ['fresh'] + fake_spec = fake_request_spec.fake_db_spec(fake_spec_obj) + get_by_uuid.return_value = fake_spec + mock_get_ig.side_effect = exception.InstanceGroupNotFound( + group_uuid=uuids.instgroup + ) + + req_obj = request_spec.RequestSpec.get_by_instance_uuid( + self.context, fake_spec['instance_uuid'] + ) + # assert that both the instance_group object and scheduler hint + # are cleared if the instance_group was deleted since the request + # spec was last saved to the db. + self.assertIsNone(req_obj.instance_group, objects.InstanceGroup) + self.assertEqual({'hint': ['over-there']}, req_obj.scheduler_hints) + @mock.patch('nova.objects.request_spec.RequestSpec.save') @mock.patch.object( request_spec.RequestSpec, '_get_by_instance_uuid_from_db') diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py index 804b76ffba..b88cfd19ef 100644 --- a/nova/tests/unit/pci/test_stats.py +++ b/nova/tests/unit/pci/test_stats.py @@ -98,16 +98,7 @@ class PciDeviceStatsTestCase(test.NoDBTestCase): def setUp(self): super(PciDeviceStatsTestCase, self).setUp() - self._setup_pci_stats() - - def _setup_pci_stats(self, numa_topology=None): - """Exists for tests that need to setup pci_stats with a specific NUMA - topology, while still allowing tests that don't care to get the default - "empty" one. - """ - if not numa_topology: - numa_topology = objects.NUMATopology() - self.pci_stats = stats.PciDeviceStats(numa_topology) + self.pci_stats = stats.PciDeviceStats(objects.NUMATopology()) # The following two calls need to be made before adding the devices. patcher = fakes.fake_pci_whitelist() self.addCleanup(patcher.stop) @@ -240,18 +231,18 @@ class PciDeviceStatsTestCase(test.NoDBTestCase): self.assertFalse(self.pci_stats.support_requests(pci_requests, cells)) def test_filter_pools_for_socket_affinity_no_socket(self): - self._setup_pci_stats( - objects.NUMATopology( - cells=[objects.NUMACell(socket=None)])) + self.pci_stats.numa_topology = objects.NUMATopology( + cells=[objects.NUMACell(socket=None)]) + self.assertEqual( [], self.pci_stats._filter_pools_for_socket_affinity( self.pci_stats.pools, [objects.InstanceNUMACell()])) def test_filter_pools_for_socket_affinity(self): - self._setup_pci_stats( - objects.NUMATopology( - cells=[objects.NUMACell(id=1, socket=1)])) + self.pci_stats.numa_topology = objects.NUMATopology( + cells=[objects.NUMACell(id=1, socket=1)]) + pools = self.pci_stats._filter_pools_for_socket_affinity( self.pci_stats.pools, [objects.InstanceNUMACell(id=1)]) self.assertEqual(1, len(pools)) diff --git a/nova/tests/unit/policies/test_servers.py b/nova/tests/unit/policies/test_servers.py index 3ed4bfe085..2130c62e5f 100644 --- a/nova/tests/unit/policies/test_servers.py +++ b/nova/tests/unit/policies/test_servers.py @@ -1229,10 +1229,9 @@ class ServersPolicyTest(base.BasePolicyTest): @mock.patch('nova.compute.api.API._allow_resize_to_same_host') @mock.patch('nova.objects.RequestSpec.get_by_instance_uuid') @mock.patch('nova.objects.Instance.save') - @mock.patch('nova.api.openstack.common.get_instance') @mock.patch('nova.conductor.ComputeTaskAPI.resize_instance') def test_cross_cell_resize_server_policy( - self, mock_resize, mock_get, mock_save, mock_rs, mock_allow, m_net + self, mock_resize, mock_save, mock_rs, mock_allow, m_net ): # 'migrate' policy is checked before 'resize:cross_cell' so @@ -1262,7 +1261,7 @@ class ServersPolicyTest(base.BasePolicyTest): ) return inst - mock_get.side_effect = fake_get + self.mock_get.side_effect = fake_get def fake_validate(context, instance, host_name, allow_cross_cell_resize): diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py index 0650c62096..9b2f5c3a0a 100644 --- a/nova/tests/unit/scheduler/client/test_report.py +++ b/nova/tests/unit/scheduler/client/test_report.py @@ -10,6 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. import copy +import ddt import time from urllib import parse @@ -150,6 +151,60 @@ class SafeConnectedTestCase(test.NoDBTestCase): self.assertTrue(req.called) +@ddt.ddt +class TestSingleton(test.NoDBTestCase): + def test_singleton(self): + # Make sure we start with a clean slate + self.assertIsNone(report.PLACEMENTCLIENT) + + # Make sure the first call creates the singleton, sets it + # globally, and returns it + client = report.report_client_singleton() + self.assertEqual(client, report.PLACEMENTCLIENT) + + # Make sure that a subsequent call returns the same thing + # again and that the global is unchanged + self.assertEqual(client, report.report_client_singleton()) + self.assertEqual(client, report.PLACEMENTCLIENT) + + @ddt.data(ks_exc.EndpointNotFound, + ks_exc.MissingAuthPlugin, + ks_exc.Unauthorized, + ks_exc.DiscoveryFailure, + ks_exc.ConnectFailure, + ks_exc.RequestTimeout, + ks_exc.GatewayTimeout, + test.TestingException) + def test_errors(self, exc): + self._test_error(exc) + + @mock.patch.object(report, 'LOG') + def _test_error(self, exc, mock_log): + with mock.patch.object(report.SchedulerReportClient, '_create_client', + side_effect=exc): + self.assertRaises(exc, report.report_client_singleton) + mock_log.error.assert_called_once() + + def test_error_then_success(self): + # Simulate an error + self._test_error(ks_exc.ConnectFailure) + + # Ensure we did not set the global client + self.assertIsNone(report.PLACEMENTCLIENT) + + # Call again, with no error + client = report.report_client_singleton() + + # Make sure we got a client and that it was set as the global + # one + self.assertIsNotNone(client) + self.assertEqual(client, report.PLACEMENTCLIENT) + + # Make sure we keep getting the same one + client2 = report.report_client_singleton() + self.assertEqual(client, client2) + + class TestConstructor(test.NoDBTestCase): def setUp(self): super(TestConstructor, self).setUp() diff --git a/nova/tests/unit/test_metadata.py b/nova/tests/unit/test_metadata.py index 630cb54418..1c78ddea51 100644 --- a/nova/tests/unit/test_metadata.py +++ b/nova/tests/unit/test_metadata.py @@ -1458,20 +1458,17 @@ class MetadataHandlerTestCase(test.TestCase): for c in range(ord('a'), ord('z'))] mock_client.list_subnets.return_value = { 'subnets': subnet_list} + mock_client.list_ports.side_effect = fake_list_ports - with mock.patch.object( - mock_client, 'list_ports', - side_effect=fake_list_ports) as mock_list_ports: - - response = fake_request( - self, self.mdinst, - relpath="/2009-04-04/user-data", - address="192.192.192.2", - fake_get_metadata_by_instance_id=self._fake_x_get_metadata, - headers={'X-Forwarded-For': '192.192.192.2', - 'X-Metadata-Provider': proxy_lb_id}) - - self.assertEqual(3, mock_list_ports.call_count) + response = fake_request( + self, self.mdinst, + relpath="/2009-04-04/user-data", + address="192.192.192.2", + fake_get_metadata_by_instance_id=self._fake_x_get_metadata, + headers={'X-Forwarded-For': '192.192.192.2', + 'X-Metadata-Provider': proxy_lb_id}) + + self.assertEqual(3, mock_client.list_ports.call_count) self.assertEqual(200, response.status_int) diff --git a/nova/tests/unit/test_service_auth.py b/nova/tests/unit/test_service_auth.py index db2a2e2899..ceb2a93b02 100644 --- a/nova/tests/unit/test_service_auth.py +++ b/nova/tests/unit/test_service_auth.py @@ -55,3 +55,13 @@ class ServiceAuthTestCase(test.NoDBTestCase): result = service_auth.get_auth_plugin(self.ctx) self.assertEqual(1, mock_load.call_count) self.assertNotIsInstance(result, service_token.ServiceTokenAuthWrapper) + + @mock.patch.object(ks_loading, 'load_auth_from_conf_options', + new=mock.Mock()) + def test_get_auth_plugin_user_auth(self): + self.flags(send_service_user_token=True, group='service_user') + user_auth = mock.Mock() + + result = service_auth.get_auth_plugin(self.ctx, user_auth=user_auth) + + self.assertEqual(user_auth, result.user_auth) diff --git a/nova/tests/unit/test_test.py b/nova/tests/unit/test_test.py index 8381792de6..5642a6da74 100644 --- a/nova/tests/unit/test_test.py +++ b/nova/tests/unit/test_test.py @@ -361,21 +361,6 @@ class PatchExistsTestCase(test.NoDBTestCase): self.assertTrue(os.path.exists(os.path.dirname(__file__))) self.assertFalse(os.path.exists('non-existent/file')) - @test.patch_exists('fake_file1', True) - @test.patch_exists('fake_file2', True) - @test.patch_exists(__file__, False) - def test_patch_exists_multiple_decorators(self): - """Test that @patch_exists can be used multiple times on the - same method. - """ - self.assertTrue(os.path.exists('fake_file1')) - self.assertTrue(os.path.exists('fake_file2')) - self.assertFalse(os.path.exists(__file__)) - - # Check non-patched parameters - self.assertTrue(os.path.exists(os.path.dirname(__file__))) - self.assertFalse(os.path.exists('non-existent/file')) - class PatchOpenTestCase(test.NoDBTestCase): fake_contents = "These file contents don't really exist" diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py index dd4dc52d5b..0110b595c7 100644 --- a/nova/tests/unit/virt/hyperv/test_vmops.py +++ b/nova/tests/unit/virt/hyperv/test_vmops.py @@ -1129,7 +1129,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase): mock_unplug_vifs.assert_called_once_with( mock_instance, mock.sentinel.fake_network_info) mock_disconnect_volumes.assert_called_once_with( - mock.sentinel.FAKE_BD_INFO) + mock.sentinel.FAKE_BD_INFO, force=True) mock_delete_disk_files.assert_called_once_with( mock_instance.name) @@ -1374,12 +1374,10 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase): def test_get_vm_state(self): summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED} - with mock.patch.object(self._vmops._vmutils, - 'get_vm_summary_info') as mock_get_summary_info: - mock_get_summary_info.return_value = summary_info + self._vmops._vmutils.get_vm_summary_info.return_value = summary_info - response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME) - self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED) + response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME) + self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED) @mock.patch.object(vmops.VMOps, '_get_vm_state') def test_wait_for_power_off_true(self, mock_get_state): @@ -1418,12 +1416,11 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase): def test_list_instance_uuids(self): fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3' - with mock.patch.object(self._vmops._vmutils, - 'list_instance_notes') as mock_list_notes: - mock_list_notes.return_value = [('fake_name', [fake_uuid])] + self._vmops._vmutils.list_instance_notes.return_value = ( + [('fake_name', [fake_uuid])]) - response = self._vmops.list_instance_uuids() - mock_list_notes.assert_called_once_with() + response = self._vmops.list_instance_uuids() + self._vmops._vmutils.list_instance_notes.assert_called_once_with() self.assertEqual(response, [fake_uuid]) diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py index da7262085d..4a088b6030 100644 --- a/nova/tests/unit/virt/hyperv/test_volumeops.py +++ b/nova/tests/unit/virt/hyperv/test_volumeops.py @@ -140,7 +140,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase): self._volumeops.disconnect_volumes(block_device_info) fake_volume_driver.disconnect_volume.assert_called_once_with( - block_device_mapping[0]['connection_info']) + block_device_mapping[0]['connection_info'], force=False) + + # Verify force=True + fake_volume_driver.disconnect_volume.reset_mock() + self._volumeops.disconnect_volumes(block_device_info, force=True) + fake_volume_driver.disconnect_volume.assert_called_once_with( + block_device_mapping[0]['connection_info'], force=True) @mock.patch('time.sleep') @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') @@ -180,7 +186,7 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase): if attach_failed: fake_volume_driver.disconnect_volume.assert_called_once_with( - fake_conn_info) + fake_conn_info, force=False) mock_sleep.assert_has_calls( [mock.call(CONF.hyperv.volume_attach_retry_interval)] * CONF.hyperv.volume_attach_retry_count) @@ -202,7 +208,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase): mock_get_volume_driver.assert_called_once_with( mock.sentinel.conn_info) fake_volume_driver.disconnect_volume.assert_called_once_with( - mock.sentinel.conn_info) + mock.sentinel.conn_info, force=False) + + # Verify force=True + fake_volume_driver.disconnect_volume.reset_mock() + self._volumeops.disconnect_volume(mock.sentinel.conn_info, force=True) + fake_volume_driver.disconnect_volume.assert_called_once_with( + mock.sentinel.conn_info, force=True) @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') def test_detach_volume(self, mock_get_volume_driver): @@ -346,7 +358,13 @@ class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase): self._base_vol_driver.disconnect_volume(conn_info) self._conn.disconnect_volume.assert_called_once_with( - conn_info['data']) + conn_info['data'], force=False) + + # Verify force=True + self._conn.disconnect_volume.reset_mock() + self._base_vol_driver.disconnect_volume(conn_info, force=True) + self._conn.disconnect_volume.assert_called_once_with( + conn_info['data'], force=True) @mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_res_path') def _test_get_disk_resource_path_by_conn_info(self, diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py index 7b377b21c2..0b1cc7d47f 100644 --- a/nova/tests/unit/virt/ironic/test_driver.py +++ b/nova/tests/unit/virt/ironic/test_driver.py @@ -2597,9 +2597,6 @@ class IronicDriverSyncTestCase(IronicDriverTestCase): # that the thread completes. self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) - self.mock_conn = self.useFixture( - fixtures.MockPatchObject(self.driver, '_ironic_connection')).mock - @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') def test_rescue(self, mock_sps, mock_looping): diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py index 396edfd024..c7577745ab 100644 --- a/nova/tests/unit/virt/libvirt/test_config.py +++ b/nova/tests/unit/virt/libvirt/test_config.py @@ -3135,6 +3135,32 @@ class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest): config.LibvirtConfigNodeDeviceMdevInformation) self.assertEqual("nvidia-11", obj.mdev_information.type) self.assertEqual(12, obj.mdev_information.iommu_group) + self.assertIsNone(obj.mdev_information.uuid) + + def test_config_mdev_device_uuid(self): + xmlin = """ + <device> + <name>mdev_b2107403_110c_45b0_af87_32cc91597b8a_0000_41_00_0</name> + <path>/sys/devices/pci0000:40/0000:40:03.1/0000:41:00.0/b2107403-110c-45b0-af87-32cc91597b8a</path> + <parent>pci_0000_41_00_0</parent> + <driver> + <name>vfio_mdev</name> + </driver> + <capability type='mdev'> + <type id='nvidia-442'/> + <uuid>b2107403-110c-45b0-af87-32cc91597b8a</uuid> + <iommuGroup number='57'/> + </capability> + </device>""" + + obj = config.LibvirtConfigNodeDevice() + obj.parse_str(xmlin) + self.assertIsInstance(obj.mdev_information, + config.LibvirtConfigNodeDeviceMdevInformation) + self.assertEqual("nvidia-442", obj.mdev_information.type) + self.assertEqual(57, obj.mdev_information.iommu_group) + self.assertEqual("b2107403-110c-45b0-af87-32cc91597b8a", + obj.mdev_information.uuid) def test_config_vdpa_device(self): xmlin = """ diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index 5632fcba86..0eada9ee14 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -740,16 +740,14 @@ class LibvirtConnTestCase(test.NoDBTestCase, 'resolve_driver_format', imagebackend.Image._get_driver_format) - self.useFixture(nova_fixtures.LibvirtFixture()) + self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture()) # ensure tests perform the same on all host architectures; this is # already done by the fakelibvirt fixture but we want to change the # architecture in some tests - _p = mock.patch('os.uname') - self.mock_uname = _p.start() + self.mock_uname = self.libvirt.mock_uname self.mock_uname.return_value = fakelibvirt.os_uname( 'Linux', '', '5.4.0-0-generic', '', fields.Architecture.X86_64) - self.addCleanup(_p.stop) self.test_instance = _create_test_instance() network_info = objects.InstanceInfoCache( @@ -2260,6 +2258,8 @@ class LibvirtConnTestCase(test.NoDBTestCase, instance_ref.info_cache = objects.InstanceInfoCache( network_info=network_info) + pci_utils.get_mac_by_pci_address.side_effect = None + pci_utils.get_mac_by_pci_address.return_value = 'da:d1:f2:91:95:c1' with test.nested( mock.patch('nova.objects.VirtualInterfaceList' '.get_by_instance_uuid', return_value=vifs), @@ -2269,8 +2269,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, return_value=guest), mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc', return_value=xml), - mock.patch.object(pci_utils, 'get_mac_by_pci_address', - return_value='da:d1:f2:91:95:c1')): + ): metadata_obj = drvr._build_device_metadata(self.context, instance_ref) metadata = metadata_obj.devices @@ -6974,14 +6973,12 @@ class LibvirtConnTestCase(test.NoDBTestCase, self.assertEqual(cfg.devices[5].rate_bytes, 1024) self.assertEqual(cfg.devices[5].rate_period, 2) - @mock.patch('nova.virt.libvirt.driver.os.path.exists') - @test.patch_exists(SEV_KERNEL_PARAM_FILE, False) - def test_get_guest_config_with_rng_backend(self, mock_path): + @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True) + def test_get_guest_config_with_rng_backend(self): self.flags(virt_type='kvm', rng_dev_path='/dev/hw_rng', group='libvirt') self.flags(pointer_model='ps2mouse') - mock_path.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) @@ -7591,11 +7588,8 @@ class LibvirtConnTestCase(test.NoDBTestCase, @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_guest_storage_config") @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support") - @mock.patch('os.path.exists', return_value=True) - @test.patch_exists(SEV_KERNEL_PARAM_FILE, False) - def test_get_guest_config_aarch64( - self, mock_path_exists, mock_numa, mock_storage, - ): + @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True) + def test_get_guest_config_aarch64(self, mock_numa, mock_storage): TEST_AMOUNT_OF_PCIE_SLOTS = 8 CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS, group='libvirt') @@ -7615,7 +7609,6 @@ class LibvirtConnTestCase(test.NoDBTestCase, cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self), image_meta, disk_info) - self.assertTrue(mock_path_exists.called) self.assertEqual(cfg.os_mach_type, "virt") num_ports = 0 @@ -7632,10 +7625,9 @@ class LibvirtConnTestCase(test.NoDBTestCase, @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_guest_storage_config") @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support") - @mock.patch('os.path.exists', return_value=True) - @test.patch_exists(SEV_KERNEL_PARAM_FILE, False) + @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True) def test_get_guest_config_aarch64_with_graphics( - self, mock_path_exists, mock_numa, mock_storage, + self, mock_numa, mock_storage, ): self.mock_uname.return_value = fakelibvirt.os_uname( 'Linux', '', '5.4.0-0-generic', '', fields.Architecture.AARCH64) @@ -7645,7 +7637,6 @@ class LibvirtConnTestCase(test.NoDBTestCase, cfg = self._get_guest_config_with_graphics() - self.assertTrue(mock_path_exists.called) self.assertEqual(cfg.os_mach_type, "virt") usbhost_exists = False @@ -9231,7 +9222,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, drvr._disconnect_volume( self.context, fake_connection_info, fake_instance_1) mock_volume_driver.disconnect_volume.assert_called_once_with( - fake_connection_info, fake_instance_1) + fake_connection_info, fake_instance_1, force=False) @mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor') @mock.patch('nova.objects.InstanceList.get_uuids_by_host') @@ -9605,7 +9596,12 @@ class LibvirtConnTestCase(test.NoDBTestCase, device_name='vdc', ), mock.call.detach_encryptor(**encryption), - mock.call.disconnect_volume(connection_info, instance)]) + mock.call.disconnect_volume( + connection_info, + instance, + force=False, + ) + ]) get_device_conf_func = mock_detach_with_retry.mock_calls[0][1][2] self.assertEqual(mock_guest.get_disk, get_device_conf_func.func) self.assertEqual(('vdc',), get_device_conf_func.args) @@ -11410,13 +11406,11 @@ class LibvirtConnTestCase(test.NoDBTestCase, @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' - '_assert_dest_node_has_enough_disk') - @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file') def test_check_can_live_migration_source_disk_over_commit_none(self, - mock_check, mock_shared_block, mock_enough, mock_disk_check): + mock_check, mock_shared_block, mock_disk_check): mock_check.return_value = False mock_shared_block.return_value = False @@ -15548,8 +15542,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, filename=filename, size=100 * units.Gi, ephemeral_size=mock.ANY, specified_fs=None) - @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') - def test_create_image_resize_snap_backend(self, mock_cache): + def test_create_image_resize_snap_backend(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance.task_state = task_states.RESIZE_FINISH @@ -16008,9 +16001,10 @@ class LibvirtConnTestCase(test.NoDBTestCase, self.assertEqual(ip, CONF.my_ip) @mock.patch.object(libvirt_driver.LOG, 'warning') - @mock.patch('nova.compute.utils.get_machine_ips') - def test_check_my_ip(self, mock_ips, mock_log): - mock_ips.return_value = ['8.8.8.8', '75.75.75.75'] + def test_check_my_ip(self, mock_log): + + self.libvirt.mock_get_machine_ips.return_value = [ + '8.8.8.8', '75.75.75.75'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._check_my_ip() mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was ' @@ -16032,6 +16026,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False + drvr._host._init_events.return_value = None with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( @@ -16039,8 +16034,6 @@ class LibvirtConnTestCase(test.NoDBTestCase, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), - mock.patch.object(drvr._host, "_init_events", - return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): @@ -16055,6 +16048,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False + drvr._host._init_events.return_value = None with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( @@ -16062,8 +16056,6 @@ class LibvirtConnTestCase(test.NoDBTestCase, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), - mock.patch.object(drvr._host, "_init_events", - return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", @@ -16083,11 +16075,10 @@ class LibvirtConnTestCase(test.NoDBTestCase, drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) service_mock = mock.MagicMock() service_mock.disabled.return_value = True + drvr._host._init_events.return_value = None with test.nested( mock.patch.object(drvr._host, "_connect", return_value=mock.MagicMock()), - mock.patch.object(drvr._host, "_init_events", - return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", @@ -17575,12 +17566,11 @@ class LibvirtConnTestCase(test.NoDBTestCase, got = drvr._get_cpu_info() self.assertEqual(want, got) - @mock.patch.object(pci_utils, 'get_ifname_by_pci_address', - return_value='ens1') @mock.patch.object(host.Host, 'list_pci_devices', return_value=['pci_0000_04_00_3', 'pci_0000_04_10_7', 'pci_0000_04_11_7']) - def test_get_pci_passthrough_devices(self, mock_list, mock_get_ifname): + def test_get_pci_passthrough_devices(self, mock_list): + pci_utils.get_ifname_by_pci_address.return_value = 'ens1' drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @@ -17614,7 +17604,10 @@ class LibvirtConnTestCase(test.NoDBTestCase, "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None, - "numa_node": None}, + "numa_node": None, + # value defined in the LibvirtFixture + "mac_address": "52:54:00:1e:59:c6", + }, { "dev_id": "pci_0000_04_10_7", "domain": 0, @@ -17650,7 +17643,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, # The first call for every VF is to determine parent_ifname and # the second call to determine the MAC address. - mock_get_ifname.assert_has_calls([ + pci_utils.get_ifname_by_pci_address.assert_has_calls([ mock.call('0000:04:10.7', pf_interface=True), mock.call('0000:04:11.7', pf_interface=True), ]) @@ -19823,16 +19816,64 @@ class LibvirtConnTestCase(test.NoDBTestCase, self.context, mock.sentinel.connection_info, instance, - destroy_secrets=False + destroy_secrets=False, + force=True ), mock.call( self.context, mock.sentinel.connection_info, instance, - destroy_secrets=True + destroy_secrets=True, + force=True ) ]) + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver') + @mock.patch( + 'nova.virt.libvirt.driver.LibvirtDriver._should_disconnect_target', + new=mock.Mock(return_value=True)) + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._detach_encryptor', + new=mock.Mock()) + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain', + new=mock.Mock()) + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vpmems', + new=mock.Mock(return_value=None)) + def test_cleanup_disconnect_volume(self, mock_vol_driver): + """Verify that we call disconnect_volume() with force=True + + cleanup() is called by destroy() when an instance is being deleted and + force=True should be passed down to os-brick's disconnect_volume() + call, which will ensure removal of devices regardless of errors. + + We need to ensure that devices are removed when an instance is being + deleted to avoid leaving leftover devices that could later be + erroneously connected by external entities (example: multipathd) to + instances that should not have access to the volumes. + + See https://bugs.launchpad.net/nova/+bug/2004555 for details. + """ + connection_info = mock.MagicMock() + block_device_info = { + 'block_device_mapping': [ + { + 'connection_info': connection_info + } + ] + } + instance = objects.Instance(self.context, **self.test_instance) + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) + + drvr.cleanup( + self.context, + instance, + network_info={}, + block_device_info=block_device_info, + destroy_vifs=False, + destroy_disks=False, + ) + mock_vol_driver.return_value.disconnect_volume.assert_called_once_with( + connection_info, instance, force=True) + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption') @mock.patch.object(libvirt_driver.LibvirtDriver, '_allow_native_luksv1') def test_swap_volume_native_luks_blocked(self, mock_allow_native_luksv1, @@ -22060,11 +22101,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin): self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) - @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' - '._get_instance_disk_info') @mock.patch('nova.virt.driver.block_device_info_get_ephemerals') - def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get, - mock_get_disk_info): + def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get): mappings = [ { 'device_name': '/dev/sdb4', @@ -22111,7 +22149,6 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin): # Old flavor, eph is 20, real disk is 3, target is 2, fail flavor = {'root_gb': 10, 'ephemeral_gb': 2} flavor_obj = objects.Flavor(**flavor) - mock_get_disk_info.return_value = fake_disk_info_json(instance) self.assertRaises( exception.InstanceFaultRollback, @@ -25561,9 +25598,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin): } self._test_get_gpu_inventories(drvr, expected, ['nvidia-11']) - @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' - '._get_mdev_capable_devices') - def test_get_gpu_inventories_with_two_types(self, get_mdev_capable_devs): + def test_get_gpu_inventories_with_two_types(self): self.flags(enabled_mdev_types=['nvidia-11', 'nvidia-12'], group='devices') # we need to call the below again to ensure the updated @@ -28510,13 +28545,11 @@ class LVMSnapshotTests(_BaseSnapshotTests): new=mock.Mock(return_value=None)) @mock.patch('nova.virt.libvirt.utils.get_disk_type_from_path', new=mock.Mock(return_value='lvm')) - @mock.patch('nova.virt.libvirt.utils.file_open', - side_effect=[io.BytesIO(b''), io.BytesIO(b'')]) @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image') @mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info') def _test_lvm_snapshot(self, disk_format, mock_volume_info, - mock_convert_image, mock_file_open): + mock_convert_image): self.flags(images_type='lvm', images_volume_group='nova-vg', group='libvirt') diff --git a/nova/tests/unit/virt/libvirt/test_guest.py b/nova/tests/unit/virt/libvirt/test_guest.py index 70d438d816..47e9ba4b62 100644 --- a/nova/tests/unit/virt/libvirt/test_guest.py +++ b/nova/tests/unit/virt/libvirt/test_guest.py @@ -1040,3 +1040,25 @@ class JobInfoTestCase(test.NoDBTestCase): mock_stats.assert_called_once_with() mock_info.assert_called_once_with() + + @mock.patch.object(fakelibvirt.virDomain, "jobInfo") + @mock.patch.object(fakelibvirt.virDomain, "jobStats") + def test_job_stats_no_ram(self, mock_stats, mock_info): + mock_stats.side_effect = fakelibvirt.make_libvirtError( + fakelibvirt.libvirtError, + "internal error: migration was active, but no RAM info was set", + error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR, + error_message="migration was active, but no RAM info was set") + + info = self.guest.get_job_info() + + self.assertIsInstance(info, libvirt_guest.JobInfo) + self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_NONE, info.type) + self.assertEqual(0, info.time_elapsed) + self.assertEqual(0, info.time_remaining) + self.assertEqual(0, info.memory_total) + self.assertEqual(0, info.memory_processed) + self.assertEqual(0, info.memory_remaining) + + mock_stats.assert_called_once_with() + self.assertFalse(mock_info.called) diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py index d71d13ab37..a46a3e46a5 100644 --- a/nova/tests/unit/virt/libvirt/test_host.py +++ b/nova/tests/unit/virt/libvirt/test_host.py @@ -16,6 +16,7 @@ import os +import ddt import eventlet from eventlet import greenthread from eventlet import tpool @@ -71,11 +72,10 @@ class HostTestCase(test.NoDBTestCase): self.useFixture(nova_fixtures.LibvirtFixture()) self.host = host.Host("qemu:///system") - @mock.patch("nova.virt.libvirt.host.Host._init_events") - def test_repeat_initialization(self, mock_init_events): + def test_repeat_initialization(self): for i in range(3): self.host.initialize() - mock_init_events.assert_called_once_with() + self.host._init_events.assert_called_once_with() @mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback") def test_close_callback(self, mock_close): @@ -1113,8 +1113,9 @@ Active: 8381604 kB expect_vf = ["rx", "tx", "sg", "tso", "gso", "gro", "rxvlan", "txvlan"] self.assertEqual(expect_vf, actualvf) - @mock.patch.object(pci_utils, 'get_ifname_by_pci_address') - def test_get_pcidev_info_non_nic(self, mock_get_ifname): + def test_get_pcidev_info_non_nic(self): + pci_utils.get_mac_by_pci_address.side_effect = ( + exception.PciDeviceNotFoundById('0000:04:00.3')) dev_name = "pci_0000_04_11_7" pci_dev = fakelibvirt.NodeDevice( self.host._get_connection(), @@ -1128,11 +1129,10 @@ Active: 8381604 kB 'parent_addr': '0000:04:00.3', } self.assertEqual(expect_vf, actual_vf) - mock_get_ifname.assert_not_called() + pci_utils.get_ifname_by_pci_address.assert_not_called() - @mock.patch.object(pci_utils, 'get_ifname_by_pci_address', - return_value='ens1') - def test_get_pcidev_info(self, mock_get_ifname): + def test_get_pcidev_info(self): + pci_utils.get_ifname_by_pci_address.return_value = 'ens1' devs = { "pci_0000_04_00_3", "pci_0000_04_10_7", "pci_0000_04_11_7", "pci_0000_04_00_1", "pci_0000_03_00_0", "pci_0000_03_00_1", @@ -1156,9 +1156,9 @@ Active: 8381604 kB dev for dev in node_devs.values() if dev.name() in devs] name = "pci_0000_04_00_3" - actual_vf = self.host._get_pcidev_info( + actual_pf = self.host._get_pcidev_info( name, node_devs[name], net_devs, [], []) - expect_vf = { + expect_pf = { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', @@ -1166,8 +1166,10 @@ Active: 8381604 kB "vendor_id": '8086', "label": 'label_8086_1521', "dev_type": obj_fields.PciDeviceType.SRIOV_PF, + # value defined in the LibvirtFixture + "mac_address": "52:54:00:1e:59:c6", } - self.assertEqual(expect_vf, actual_vf) + self.assertEqual(expect_pf, actual_pf) name = "pci_0000_04_10_7" actual_vf = self.host._get_pcidev_info( @@ -1222,9 +1224,9 @@ Active: 8381604 kB self.assertEqual(expect_vf, actual_vf) name = "pci_0000_03_00_0" - actual_vf = self.host._get_pcidev_info( + actual_pf = self.host._get_pcidev_info( name, node_devs[name], net_devs, [], []) - expect_vf = { + expect_pf = { "dev_id": "pci_0000_03_00_0", "address": "0000:03:00.0", "product_id": '1013', @@ -1232,13 +1234,15 @@ Active: 8381604 kB "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": obj_fields.PciDeviceType.SRIOV_PF, + # value defined in the LibvirtFixture + "mac_address": "52:54:00:1e:59:c6", } - self.assertEqual(expect_vf, actual_vf) + self.assertEqual(expect_pf, actual_pf) name = "pci_0000_03_00_1" - actual_vf = self.host._get_pcidev_info( + actual_pf = self.host._get_pcidev_info( name, node_devs[name], net_devs, [], []) - expect_vf = { + expect_pf = { "dev_id": "pci_0000_03_00_1", "address": "0000:03:00.1", "product_id": '1013', @@ -1246,8 +1250,10 @@ Active: 8381604 kB "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": obj_fields.PciDeviceType.SRIOV_PF, + # value defined in the LibvirtFixture + "mac_address": "52:54:00:1e:59:c6", } - self.assertEqual(expect_vf, actual_vf) + self.assertEqual(expect_pf, actual_pf) # Parent PF with a VPD cap. name = "pci_0000_82_00_0" @@ -1264,6 +1270,8 @@ Active: 8381604 kB "capabilities": { # Should be obtained from the parent PF in this case. "vpd": {"card_serial_number": "MT2113X00000"}}, + # value defined in the LibvirtFixture + "mac_address": "52:54:00:1e:59:c6", } self.assertEqual(expect_pf, actual_pf) @@ -1928,6 +1936,7 @@ class TestLibvirtSEV(test.NoDBTestCase): self.host = host.Host("qemu:///system") +@ddt.ddt class TestLibvirtSEVUnsupported(TestLibvirtSEV): @mock.patch.object(os.path, 'exists', return_value=False) def test_kernel_parameter_missing(self, fake_exists): @@ -1935,19 +1944,26 @@ class TestLibvirtSEVUnsupported(TestLibvirtSEV): fake_exists.assert_called_once_with( '/sys/module/kvm_amd/parameters/sev') + @ddt.data( + ('0\n', False), + ('N\n', False), + ('1\n', True), + ('Y\n', True), + ) + @ddt.unpack @mock.patch.object(os.path, 'exists', return_value=True) - @mock.patch('builtins.open', mock.mock_open(read_data="0\n")) - def test_kernel_parameter_zero(self, fake_exists): - self.assertFalse(self.host._kernel_supports_amd_sev()) - fake_exists.assert_called_once_with( - '/sys/module/kvm_amd/parameters/sev') - - @mock.patch.object(os.path, 'exists', return_value=True) - @mock.patch('builtins.open', mock.mock_open(read_data="1\n")) - def test_kernel_parameter_one(self, fake_exists): - self.assertTrue(self.host._kernel_supports_amd_sev()) - fake_exists.assert_called_once_with( - '/sys/module/kvm_amd/parameters/sev') + def test_kernel_parameter( + self, sev_param_value, expected_support, mock_exists + ): + with mock.patch( + 'builtins.open', mock.mock_open(read_data=sev_param_value) + ): + self.assertIs( + expected_support, + self.host._kernel_supports_amd_sev() + ) + mock_exists.assert_called_once_with( + '/sys/module/kvm_amd/parameters/sev') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch('builtins.open', mock.mock_open(read_data="1\n")) diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py index 43504efeb5..697300b9cf 100644 --- a/nova/tests/unit/virt/libvirt/test_vif.py +++ b/nova/tests/unit/virt/libvirt/test_vif.py @@ -517,18 +517,17 @@ class LibvirtVifTestCase(test.NoDBTestCase): def setUp(self): super(LibvirtVifTestCase, self).setUp() - self.useFixture(nova_fixtures.LibvirtFixture(stub_os_vif=False)) + self.libvirt = self.useFixture( + nova_fixtures.LibvirtFixture(stub_os_vif=False)) # os_vif.initialize is typically done in nova-compute startup os_vif.initialize() self.setup_os_vif_objects() # multiqueue configuration is host OS specific - _a = mock.patch('os.uname') - self.mock_uname = _a.start() + self.mock_uname = self.libvirt.mock_uname self.mock_uname.return_value = fakelibvirt.os_uname( 'Linux', '', '5.10.13-200-generic', '', 'x86_64') - self.addCleanup(_a.stop) def _get_node(self, xml): doc = etree.fromstring(xml) @@ -983,14 +982,9 @@ class LibvirtVifTestCase(test.NoDBTestCase): self.vif_bridge, self.vif_bridge['network']['bridge']) - @mock.patch.object(pci_utils, 'get_ifname_by_pci_address') - @mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1) - @mock.patch('nova.privsep.linux_net.set_device_macaddr') - @mock.patch('nova.privsep.linux_net.set_device_macaddr_and_vlan') - def _test_hw_veb_op(self, op, vlan, mock_set_macaddr_and_vlan, - mock_set_macaddr, mock_get_vf_num, - mock_get_ifname): - mock_get_ifname.side_effect = ['eth1', 'eth13'] + def _test_hw_veb_op(self, op, vlan): + self.libvirt.mock_get_vf_num_by_pci_address.return_value = 1 + pci_utils.get_ifname_by_pci_address.side_effect = ['eth1', 'eth13'] vlan_id = int(vlan) port_state = 'up' if vlan_id > 0 else 'down' mac = ('00:00:00:00:00:00' if op.__name__ == 'unplug' @@ -1005,10 +999,13 @@ class LibvirtVifTestCase(test.NoDBTestCase): 'set_macaddr': [mock.call('eth13', mac, port_state=port_state)] } op(self.instance, self.vif_hw_veb_macvtap) - mock_get_ifname.assert_has_calls(calls['get_ifname']) - mock_get_vf_num.assert_has_calls(calls['get_vf_num']) - mock_set_macaddr.assert_has_calls(calls['set_macaddr']) - mock_set_macaddr_and_vlan.assert_called_once_with( + pci_utils.get_ifname_by_pci_address.assert_has_calls( + calls['get_ifname']) + self.libvirt.mock_get_vf_num_by_pci_address.assert_has_calls( + calls['get_vf_num']) + self.libvirt.mock_set_device_macaddr.assert_has_calls( + calls['set_macaddr']) + self.libvirt.mock_set_device_macaddr_and_vlan.assert_called_once_with( 'eth1', 1, mock.ANY, vlan_id) def test_plug_hw_veb(self): @@ -1218,9 +1215,8 @@ class LibvirtVifTestCase(test.NoDBTestCase): self.assertEqual(1, len(node)) self._assertPciEqual(node, self.vif_hostdev_physical) - @mock.patch.object(pci_utils, 'get_ifname_by_pci_address', - return_value='eth1') - def test_hw_veb_driver_macvtap(self, mock_get_ifname): + def test_hw_veb_driver_macvtap(self): + pci_utils.get_ifname_by_pci_address.return_value = 'eth1' d = vif.LibvirtGenericVIFDriver() xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap) node = self._get_node(xml) diff --git a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py index 89a59f2f1a..f0d403e300 100644 --- a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py +++ b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py @@ -81,3 +81,23 @@ class LibvirtFibreChannelVolumeDriverTestCase( self.assertEqual(requested_size, new_size) libvirt_driver.connector.extend_volume.assert_called_once_with( connection_info['data']) + + def test_disconnect_volume(self): + device_path = '/dev/fake-dev' + connection_info = {'data': {'device_path': device_path}} + + libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver( + self.fake_host) + libvirt_driver.connector.disconnect_volume = mock.MagicMock() + libvirt_driver.disconnect_volume( + connection_info, mock.sentinel.instance) + + libvirt_driver.connector.disconnect_volume.assert_called_once_with( + connection_info['data'], connection_info['data'], force=False) + + # Verify force=True + libvirt_driver.connector.disconnect_volume.reset_mock() + libvirt_driver.disconnect_volume( + connection_info, mock.sentinel.instance, force=True) + libvirt_driver.connector.disconnect_volume.assert_called_once_with( + connection_info['data'], connection_info['data'], force=True) diff --git a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py index f8a64abea5..540c9c822d 100644 --- a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py +++ b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py @@ -57,10 +57,19 @@ class LibvirtISCSIVolumeDriverTestCase( device=device_path)) libvirt_driver.disconnect_volume(connection_info, mock.sentinel.instance) + libvirt_driver.connector.disconnect_volume.assert_called_once_with( + connection_info['data'], None, force=False) msg = mock_LOG_warning.call_args_list[0] self.assertIn('Ignoring VolumeDeviceNotFound', msg[0][0]) + # Verify force=True + libvirt_driver.connector.disconnect_volume.reset_mock() + libvirt_driver.disconnect_volume( + connection_info, mock.sentinel.instance, force=True) + libvirt_driver.connector.disconnect_volume.assert_called_once_with( + connection_info['data'], None, force=True) + def test_extend_volume(self): device_path = '/dev/fake-dev' connection_info = {'data': {'device_path': device_path}} diff --git a/nova/tests/unit/virt/libvirt/volume/test_lightos.py b/nova/tests/unit/virt/libvirt/volume/test_lightos.py index 554647acf4..1eb9583d4c 100644 --- a/nova/tests/unit/virt/libvirt/volume/test_lightos.py +++ b/nova/tests/unit/virt/libvirt/volume/test_lightos.py @@ -30,7 +30,7 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): device_scan_attempts=5) @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory', - new=mock.Mock(return_value=mock.Mock())) + new=mock.Mock()) def test_libvirt_lightos_driver_connect(self): lightos_driver = lightos.LibvirtLightOSVolumeDriver( self.fake_host) @@ -40,15 +40,16 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): 'name': 'aLightVolume', 'conf': config} connection_info = {'data': disk_info} - with mock.patch.object(lightos_driver.connector, - 'connect_volume', - return_value={'path': '/dev/dms1234567'}): - lightos_driver.connect_volume(connection_info, None) - (lightos_driver.connector.connect_volume. - assert_called_once_with( - connection_info['data'])) - self.assertEqual('/dev/dms1234567', - connection_info['data']['device_path']) + lightos_driver.connector.connect_volume.return_value = ( + {'path': '/dev/dms1234567'}) + + lightos_driver.connect_volume(connection_info, None) + + lightos_driver.connector.connect_volume.assert_called_once_with( + connection_info['data']) + self.assertEqual( + '/dev/dms1234567', + connection_info['data']['device_path']) @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory', new=mock.Mock(return_value=mock.Mock())) @@ -61,7 +62,13 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): connection_info = {'data': disk_info} lightos_driver.disconnect_volume(connection_info, None) lightos_driver.connector.disconnect_volume.assert_called_once_with( - disk_info, None) + disk_info, None, force=False) + + # Verify force=True + lightos_driver.connector.disconnect_volume.reset_mock() + lightos_driver.disconnect_volume(connection_info, None, force=True) + lightos_driver.connector.disconnect_volume.assert_called_once_with( + disk_info, None, force=True) @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory', new=mock.Mock(return_value=mock.Mock())) diff --git a/nova/tests/unit/virt/libvirt/volume/test_nvme.py b/nova/tests/unit/virt/libvirt/volume/test_nvme.py index fcb303b4c3..2803903e9f 100644 --- a/nova/tests/unit/virt/libvirt/volume/test_nvme.py +++ b/nova/tests/unit/virt/libvirt/volume/test_nvme.py @@ -56,14 +56,15 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): 'name': 'aNVMEVolume', 'conf': config} connection_info = {'data': disk_info} - with mock.patch.object(nvme_driver.connector, - 'connect_volume', - return_value={'path': '/dev/dms1234567'}): - nvme_driver.connect_volume(connection_info, None) - nvme_driver.connector.connect_volume.assert_called_once_with( - connection_info['data']) - self.assertEqual('/dev/dms1234567', - connection_info['data']['device_path']) + nvme_driver.connector.connect_volume.return_value = ( + {'path': '/dev/dms1234567'}) + + nvme_driver.connect_volume(connection_info, None) + + nvme_driver.connector.connect_volume.assert_called_once_with( + connection_info['data']) + self.assertEqual( + '/dev/dms1234567', connection_info['data']['device_path']) @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory', new=mock.Mock(return_value=mock.Mock())) @@ -76,7 +77,13 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): connection_info = {'data': disk_info} nvme_driver.disconnect_volume(connection_info, None) nvme_driver.connector.disconnect_volume.assert_called_once_with( - disk_info, None) + disk_info, None, force=False) + + # Verify force=True + nvme_driver.connector.disconnect_volume.reset_mock() + nvme_driver.disconnect_volume(connection_info, None, force=True) + nvme_driver.connector.disconnect_volume.assert_called_once_with( + disk_info, None, force=True) @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory', new=mock.Mock(return_value=mock.Mock())) diff --git a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py index 6d9247cd2d..ed5ab08a6e 100644 --- a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py +++ b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py @@ -49,7 +49,13 @@ class LibvirtScaleIOVolumeDriverTestCase( conn = {'data': mock.sentinel.conn_data} sio.disconnect_volume(conn, mock.sentinel.instance) sio.connector.disconnect_volume.assert_called_once_with( - mock.sentinel.conn_data, None) + mock.sentinel.conn_data, None, force=False) + + # Verify force=True + sio.connector.disconnect_volume.reset_mock() + sio.disconnect_volume(conn, mock.sentinel.instance, force=True) + sio.connector.disconnect_volume.assert_called_once_with( + mock.sentinel.conn_data, None, force=True) @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory', new=mock.Mock(return_value=mock.Mock())) diff --git a/nova/tests/unit/virt/libvirt/volume/test_storpool.py b/nova/tests/unit/virt/libvirt/volume/test_storpool.py index e14954f148..9ceac07260 100644 --- a/nova/tests/unit/virt/libvirt/volume/test_storpool.py +++ b/nova/tests/unit/virt/libvirt/volume/test_storpool.py @@ -53,9 +53,11 @@ class MockStorPoolConnector(object): } return {'type': 'block', 'path': test_attached[v]['path']} - def disconnect_volume(self, connection_info, device_info): + def disconnect_volume(self, connection_info, device_info, **kwargs): self.inst.assertIn('client_id', connection_info) self.inst.assertIn('volume', connection_info) + self.inst.assertIn('force', kwargs) + self.inst.assertEqual(self.inst.force, kwargs.get('force')) v = connection_info['volume'] if v not in test_attached: @@ -86,6 +88,11 @@ class MockStorPoolInitiator(object): class LibvirtStorPoolVolumeDriverTestCase( test_volume.LibvirtVolumeBaseTestCase): + def setUp(self): + super().setUp() + # This is for testing the force flag of disconnect_volume() + self.force = False + def mock_storpool(f): def _config_inner_inner1(inst, *args, **kwargs): @mock.patch( @@ -175,3 +182,10 @@ class LibvirtStorPoolVolumeDriverTestCase( libvirt_driver.disconnect_volume(ci_2, mock.sentinel.instance) self.assertDictEqual({}, test_attached) + + # Connect the volume again so we can detach it again + libvirt_driver.connect_volume(ci_2, mock.sentinel.instance) + # Verify force=True + self.force = True + libvirt_driver.disconnect_volume( + ci_2, mock.sentinel.instance, force=True) diff --git a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py index 883cebb55a..032ceb4fe5 100644 --- a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py +++ b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py @@ -95,7 +95,13 @@ class LibvirtVZStorageTestCase(test_volume.LibvirtVolumeBaseTestCase): conn = {'data': mock.sentinel.conn_data} drv.disconnect_volume(conn, mock.sentinel.instance) drv.connector.disconnect_volume.assert_called_once_with( - mock.sentinel.conn_data, None) + mock.sentinel.conn_data, None, force=False) + + # Verify force=True + drv.connector.disconnect_volume.reset_mock() + drv.disconnect_volume(conn, mock.sentinel.instance, force=True) + drv.connector.disconnect_volume.assert_called_once_with( + mock.sentinel.conn_data, None, force=True) def test_libvirt_vzstorage_driver_get_config(self): libvirt_driver = vzstorage.LibvirtVZStorageVolumeDriver(self.fake_host) diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py index aff6c5ef19..94d9297ca3 100644 --- a/nova/tests/unit/virt/test_block_device.py +++ b/nova/tests/unit/virt/test_block_device.py @@ -433,24 +433,23 @@ class TestDriverBlockDevice(test.NoDBTestCase): def _test_call_wait_func(self, delete_on_termination, delete_fail=False): test_bdm = self.driver_classes['volume'](self.volume_bdm) test_bdm['delete_on_termination'] = delete_on_termination - with mock.patch.object(self.volume_api, 'delete') as vol_delete: - wait_func = mock.MagicMock() - mock_exception = exception.VolumeNotCreated(volume_id='fake-id', - seconds=1, - attempts=1, - volume_status='error') - wait_func.side_effect = mock_exception - - if delete_on_termination and delete_fail: - vol_delete.side_effect = Exception() - - self.assertRaises(exception.VolumeNotCreated, - test_bdm._call_wait_func, - context=self.context, - wait_func=wait_func, - volume_api=self.volume_api, - volume_id='fake-id') - self.assertEqual(delete_on_termination, vol_delete.called) + if delete_on_termination and delete_fail: + self.volume_api.delete.side_effect = Exception() + + wait_func = mock.MagicMock() + mock_exception = exception.VolumeNotCreated(volume_id='fake-id', + seconds=1, + attempts=1, + volume_status='error') + wait_func.side_effect = mock_exception + + self.assertRaises(exception.VolumeNotCreated, + test_bdm._call_wait_func, + context=self.context, + wait_func=wait_func, + volume_api=self.volume_api, + volume_id='fake-id') + self.assertEqual(delete_on_termination, self.volume_api.delete.called) def test_call_wait_delete_volume(self): self._test_call_wait_func(True) @@ -483,25 +482,24 @@ class TestDriverBlockDevice(test.NoDBTestCase): volume['shared_targets'] = True volume['service_uuid'] = uuids.service_uuid + if delete_attachment_raises: + self.volume_api.attachment_delete.side_effect = ( + delete_attachment_raises) + + self.virt_driver.get_volume_connector.return_value = connector + with test.nested( mock.patch.object(driver_bdm, '_get_volume', return_value=volume), - mock.patch.object(self.virt_driver, 'get_volume_connector', - return_value=connector), mock.patch('os_brick.initiator.utils.guard_connection'), - mock.patch.object(self.volume_api, 'attachment_delete'), - ) as (mock_get_volume, mock_get_connector, mock_guard, - vapi_attach_del): - - if delete_attachment_raises: - vapi_attach_del.side_effect = delete_attachment_raises + ) as (mock_get_volume, mock_guard): driver_bdm.detach(elevated_context, instance, self.volume_api, self.virt_driver, attachment_id=attachment_id) mock_guard.assert_called_once_with(volume) - vapi_attach_del.assert_called_once_with(elevated_context, - attachment_id) + self.volume_api.attachment_delete.assert_called_once_with( + elevated_context, attachment_id) def test_volume_delete_attachment_with_shared_targets(self): self.test_volume_delete_attachment(include_shared_targets=True) @@ -952,31 +950,28 @@ class TestDriverBlockDevice(test.NoDBTestCase): instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': uuids.uuid}) - with test.nested( - mock.patch.object(self.volume_api, 'get_snapshot', - return_value=snapshot), - mock.patch.object(self.volume_api, 'create', return_value=volume), - mock.patch.object(self.volume_api, 'delete'), - ) as (vol_get_snap, vol_create, vol_delete): - wait_func = mock.MagicMock() - mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], - seconds=1, - attempts=1, - volume_status='error') - wait_func.side_effect = mock_exception - self.assertRaises(exception.VolumeNotCreated, - test_bdm.attach, context=self.context, - instance=instance, - volume_api=self.volume_api, - virt_driver=self.virt_driver, - wait_func=wait_func) - - vol_get_snap.assert_called_once_with( - self.context, 'fake-snapshot-id-1') - vol_create.assert_called_once_with( - self.context, 3, '', '', availability_zone=None, - snapshot=snapshot, volume_type=None) - vol_delete.assert_called_once_with(self.context, volume['id']) + self.volume_api.get_snapshot.return_value = snapshot + self.volume_api.create.return_value = volume + wait_func = mock.MagicMock() + mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], + seconds=1, + attempts=1, + volume_status='error') + wait_func.side_effect = mock_exception + self.assertRaises(exception.VolumeNotCreated, + test_bdm.attach, context=self.context, + instance=instance, + volume_api=self.volume_api, + virt_driver=self.virt_driver, + wait_func=wait_func) + + self.volume_api.get_snapshot.assert_called_once_with( + self.context, 'fake-snapshot-id-1') + self.volume_api.create.assert_called_once_with( + self.context, 3, '', '', availability_zone=None, + snapshot=snapshot, volume_type=None) + self.volume_api.delete.assert_called_once_with( + self.context, volume['id']) def test_snapshot_attach_volume(self): test_bdm = self.driver_classes['volsnapshot']( @@ -984,19 +979,17 @@ class TestDriverBlockDevice(test.NoDBTestCase): instance = {'id': 'fake_id', 'uuid': uuids.uuid} - with test.nested( - mock.patch.object(self.driver_classes['volume'], 'attach'), - mock.patch.object(self.volume_api, 'get_snapshot'), - mock.patch.object(self.volume_api, 'create'), - ) as (mock_attach, mock_get_snapshot, mock_create): + with mock.patch.object( + self.driver_classes['volume'], 'attach' + ) as mock_attach: test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) mock_attach.assert_called_once_with( self.context, instance, self.volume_api, self.virt_driver) # Make sure theses are not called - mock_get_snapshot.assert_not_called() - mock_create.assert_not_called() + self.volume_api.get_snapshot.assert_not_called() + self.volume_api.create.assert_not_called() def test_snapshot_attach_no_volume_and_no_volume_type(self): bdm = self.driver_classes['volsnapshot'](self.volsnapshot_bdm) @@ -1006,15 +999,10 @@ class TestDriverBlockDevice(test.NoDBTestCase): original_volume = {'id': uuids.original_volume_id, 'volume_type_id': 'original_volume_type'} new_volume = {'id': uuids.new_volume_id} - with test.nested( - mock.patch.object(self.driver_classes['volume'], 'attach'), - mock.patch.object(self.volume_api, 'get_snapshot', - return_value=snapshot), - mock.patch.object(self.volume_api, 'get', - return_value=original_volume), - mock.patch.object(self.volume_api, 'create', - return_value=new_volume), - ) as (mock_attach, mock_get_snapshot, mock_get, mock_create): + self.volume_api.get_snapshot.return_value = snapshot + self.volume_api.get.return_value = original_volume + self.volume_api.create.return_value = new_volume + with mock.patch.object(self.driver_classes["volume"], "attach"): bdm.volume_id = None bdm.volume_type = None bdm.attach(self.context, instance, self.volume_api, @@ -1022,10 +1010,11 @@ class TestDriverBlockDevice(test.NoDBTestCase): # Assert that the original volume type is fetched, stored within # the bdm and then used to create the new snapshot based volume. - mock_get.assert_called_once_with(self.context, - uuids.original_volume_id) + self.volume_api.get.assert_called_once_with( + self.context, uuids.original_volume_id) self.assertEqual('original_volume_type', bdm.volume_type) - mock_create.assert_called_once_with(self.context, bdm.volume_size, + self.volume_api.create.assert_called_once_with( + self.context, bdm.volume_size, '', '', volume_type='original_volume_type', snapshot=snapshot, availability_zone=None) @@ -1097,27 +1086,25 @@ class TestDriverBlockDevice(test.NoDBTestCase): instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': uuids.uuid}) - with test.nested( - mock.patch.object(self.volume_api, 'create', return_value=volume), - mock.patch.object(self.volume_api, 'delete'), - ) as (vol_create, vol_delete): - wait_func = mock.MagicMock() - mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], - seconds=1, - attempts=1, - volume_status='error') - wait_func.side_effect = mock_exception - self.assertRaises(exception.VolumeNotCreated, - test_bdm.attach, context=self.context, - instance=instance, - volume_api=self.volume_api, - virt_driver=self.virt_driver, - wait_func=wait_func) - - vol_create.assert_called_once_with( - self.context, 1, '', '', image_id=image['id'], - availability_zone=None, volume_type=None) - vol_delete.assert_called_once_with(self.context, volume['id']) + self.volume_api.create.return_value = volume + wait_func = mock.MagicMock() + mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], + seconds=1, + attempts=1, + volume_status='error') + wait_func.side_effect = mock_exception + self.assertRaises(exception.VolumeNotCreated, + test_bdm.attach, context=self.context, + instance=instance, + volume_api=self.volume_api, + virt_driver=self.virt_driver, + wait_func=wait_func) + + self.volume_api.create.assert_called_once_with( + self.context, 1, '', '', image_id=image['id'], + availability_zone=None, volume_type=None) + self.volume_api.delete.assert_called_once_with( + self.context, volume['id']) def test_image_attach_volume(self): test_bdm = self.driver_classes['volimage']( @@ -1125,19 +1112,17 @@ class TestDriverBlockDevice(test.NoDBTestCase): instance = {'id': 'fake_id', 'uuid': uuids.uuid} - with test.nested( - mock.patch.object(self.driver_classes['volume'], 'attach'), - mock.patch.object(self.volume_api, 'get_snapshot'), - mock.patch.object(self.volume_api, 'create'), - ) as (mock_attch, mock_get_snapshot, mock_create): + with mock.patch.object( + self.driver_classes['volume'], 'attach' + ) as mock_attach: test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) - mock_attch.assert_called_once_with( + mock_attach.assert_called_once_with( self.context, instance, self.volume_api, self.virt_driver) # Make sure theses are not called - mock_get_snapshot.assert_not_called() - mock_create.assert_not_called() + self.volume_api.get_snapshot.assert_not_called() + self.volume_api.create.assert_not_called() def test_blank_attach_fail_volume(self): no_blank_volume = self.volblank_bdm_dict.copy() @@ -1149,30 +1134,26 @@ class TestDriverBlockDevice(test.NoDBTestCase): **{'uuid': uuids.uuid}) volume = {'id': 'fake-volume-id-2', 'display_name': '%s-blank-vol' % uuids.uuid} + self.volume_api.create.return_value = volume + wait_func = mock.MagicMock() + mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], + seconds=1, + attempts=1, + volume_status='error') + wait_func.side_effect = mock_exception + self.assertRaises(exception.VolumeNotCreated, + test_bdm.attach, context=self.context, + instance=instance, + volume_api=self.volume_api, + virt_driver=self.virt_driver, + wait_func=wait_func) - with test.nested( - mock.patch.object(self.volume_api, 'create', return_value=volume), - mock.patch.object(self.volume_api, 'delete'), - ) as (vol_create, vol_delete): - wait_func = mock.MagicMock() - mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], - seconds=1, - attempts=1, - volume_status='error') - wait_func.side_effect = mock_exception - self.assertRaises(exception.VolumeNotCreated, - test_bdm.attach, context=self.context, - instance=instance, - volume_api=self.volume_api, - virt_driver=self.virt_driver, - wait_func=wait_func) - - vol_create.assert_called_once_with( - self.context, test_bdm.volume_size, - '%s-blank-vol' % uuids.uuid, - '', volume_type=None, availability_zone=None) - vol_delete.assert_called_once_with( - self.context, volume['id']) + self.volume_api.create.assert_called_once_with( + self.context, test_bdm.volume_size, + '%s-blank-vol' % uuids.uuid, + '', volume_type=None, availability_zone=None) + self.volume_api.delete.assert_called_once_with( + self.context, volume['id']) def test_blank_attach_volume(self): no_blank_volume = self.volblank_bdm_dict.copy() @@ -1481,13 +1462,9 @@ class TestDriverBlockDevice(test.NoDBTestCase): 'display_name': 'fake-snapshot-vol'} self.stub_volume_create(volume) - with test.nested( - mock.patch.object(self.volume_api, 'get_snapshot', - return_value=snapshot), - mock.patch.object(volume_class, 'attach') - ) as ( - vol_get_snap, vol_attach - ): + self.volume_api.get_snapshot.return_value = snapshot + + with mock.patch.object(volume_class, 'attach') as vol_attach: test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py index 085b169db3..563330b541 100644 --- a/nova/tests/unit/virt/test_images.py +++ b/nova/tests/unit/virt/test_images.py @@ -16,6 +16,8 @@ import os import mock from oslo_concurrency import processutils +from oslo_serialization import jsonutils +from oslo_utils import imageutils from nova.compute import utils as compute_utils from nova import exception @@ -135,3 +137,47 @@ class QemuTestCase(test.NoDBTestCase): '-O', 'out_format', '-f', 'in_format', 'source', 'dest') mock_disk_op_sema.__enter__.assert_called_once() self.assertTupleEqual(expected, mock_execute.call_args[0]) + + def test_convert_image_vmdk_allowed_list_checking(self): + info = {'format': 'vmdk', + 'format-specific': { + 'type': 'vmdk', + 'data': { + 'create-type': 'monolithicFlat', + }}} + + # If the format is not in the allowed list, we should get an error + self.assertRaises(exception.ImageUnacceptable, + images.check_vmdk_image, 'foo', + imageutils.QemuImgInfo(jsonutils.dumps(info), + format='json')) + + # With the format in the allowed list, no error + self.flags(vmdk_allowed_types=['streamOptimized', 'monolithicFlat', + 'monolithicSparse'], + group='compute') + images.check_vmdk_image('foo', + imageutils.QemuImgInfo(jsonutils.dumps(info), + format='json')) + + # With an empty list, allow nothing + self.flags(vmdk_allowed_types=[], group='compute') + self.assertRaises(exception.ImageUnacceptable, + images.check_vmdk_image, 'foo', + imageutils.QemuImgInfo(jsonutils.dumps(info), + format='json')) + + @mock.patch.object(images, 'fetch') + @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info') + def test_fetch_checks_vmdk_rules(self, mock_info, mock_fetch): + info = {'format': 'vmdk', + 'format-specific': { + 'type': 'vmdk', + 'data': { + 'create-type': 'monolithicFlat', + }}} + mock_info.return_value = jsonutils.dumps(info) + with mock.patch('os.path.exists', return_value=True): + e = self.assertRaises(exception.ImageUnacceptable, + images.fetch_to_raw, None, 'foo', 'anypath') + self.assertIn('Invalid VMDK create-type specified', str(e)) diff --git a/nova/tests/unit/virt/vmwareapi/test_images.py b/nova/tests/unit/virt/vmwareapi/test_images.py index 7cfec00c97..b3a3cfd941 100644 --- a/nova/tests/unit/virt/vmwareapi/test_images.py +++ b/nova/tests/unit/virt/vmwareapi/test_images.py @@ -117,13 +117,11 @@ class VMwareImagesTestCase(test.NoDBTestCase): mock.patch.object(images.IMAGE_API, 'download'), mock.patch.object(images, 'image_transfer'), mock.patch.object(images, '_build_shadow_vm_config_spec'), - mock.patch.object(session, '_call_method'), mock.patch.object(vm_util, 'get_vmdk_info') ) as (mock_image_api_get, mock_image_api_download, mock_image_transfer, mock_build_shadow_vm_config_spec, - mock_call_method, mock_get_vmdk_info): image_data = {'id': 'fake-id', 'disk_format': 'vmdk', @@ -172,7 +170,7 @@ class VMwareImagesTestCase(test.NoDBTestCase): mock_write_handle) mock_get_vmdk_info.assert_called_once_with( session, mock.sentinel.vm_ref, 'fake-vm') - mock_call_method.assert_called_once_with( + session._call_method.assert_called_once_with( session.vim, "UnregisterVM", mock.sentinel.vm_ref) @mock.patch('oslo_vmware.rw_handles.ImageReadHandle') @@ -188,13 +186,11 @@ class VMwareImagesTestCase(test.NoDBTestCase): mock.patch.object(images.IMAGE_API, 'download'), mock.patch.object(images, 'image_transfer'), mock.patch.object(images, '_build_shadow_vm_config_spec'), - mock.patch.object(session, '_call_method'), mock.patch.object(vm_util, 'get_vmdk_info') ) as (mock_image_api_get, mock_image_api_download, mock_image_transfer, mock_build_shadow_vm_config_spec, - mock_call_method, mock_get_vmdk_info): image_data = {'id': 'fake-id', 'disk_format': 'vmdk', @@ -220,7 +216,7 @@ class VMwareImagesTestCase(test.NoDBTestCase): mock_image_transfer.assert_called_once_with(mock_read_handle, mock_write_handle) - mock_call_method.assert_called_once_with( + session._call_method.assert_called_once_with( session.vim, "UnregisterVM", mock.sentinel.vm_ref) mock_get_vmdk_info.assert_called_once_with( session, mock.sentinel.vm_ref, 'fake-vm') diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py index 0c170c05e4..ffa46ce2aa 100644 --- a/nova/tests/unit/volume/test_cinder.py +++ b/nova/tests/unit/volume/test_cinder.py @@ -520,16 +520,15 @@ class CinderApiTestCase(test.NoDBTestCase): @mock.patch('nova.volume.cinder.cinderclient') def test_attachment_delete_failed(self, mock_cinderclient, mock_log): mock_cinderclient.return_value.attachments.delete.side_effect = ( - cinder_exception.NotFound(404, '404')) + cinder_exception.BadRequest(400, '400')) attachment_id = uuids.attachment - ex = self.assertRaises(exception.VolumeAttachmentNotFound, + ex = self.assertRaises(exception.InvalidInput, self.api.attachment_delete, self.ctx, attachment_id) - self.assertEqual(404, ex.code) - self.assertIn(attachment_id, str(ex)) + self.assertEqual(400, ex.code) @mock.patch('nova.volume.cinder.cinderclient', side_effect=exception.CinderAPIVersionNotAvailable( @@ -546,6 +545,16 @@ class CinderApiTestCase(test.NoDBTestCase): skip_version_check=True) @mock.patch('nova.volume.cinder.cinderclient') + def test_attachment_delete_not_found(self, mock_cinderclient): + mock_cinderclient.return_value.attachments.delete.side_effect = ( + cinder_exception.ClientException(404)) + + attachment_id = uuids.attachment + self.api.attachment_delete(self.ctx, attachment_id) + + self.assertEqual(1, mock_cinderclient.call_count) + + @mock.patch('nova.volume.cinder.cinderclient') def test_attachment_delete_internal_server_error(self, mock_cinderclient): mock_cinderclient.return_value.attachments.delete.side_effect = ( cinder_exception.ClientException(500)) @@ -569,6 +578,29 @@ class CinderApiTestCase(test.NoDBTestCase): self.assertEqual(2, mock_cinderclient.call_count) @mock.patch('nova.volume.cinder.cinderclient') + def test_attachment_delete_gateway_timeout(self, mock_cinderclient): + mock_cinderclient.return_value.attachments.delete.side_effect = ( + cinder_exception.ClientException(504)) + + self.assertRaises(cinder_exception.ClientException, + self.api.attachment_delete, + self.ctx, uuids.attachment_id) + + self.assertEqual(5, mock_cinderclient.call_count) + + @mock.patch('nova.volume.cinder.cinderclient') + def test_attachment_delete_gateway_timeout_do_not_raise( + self, mock_cinderclient): + # generate exception, and then have a normal return on the next retry + mock_cinderclient.return_value.attachments.delete.side_effect = [ + cinder_exception.ClientException(504), None] + + attachment_id = uuids.attachment + self.api.attachment_delete(self.ctx, attachment_id) + + self.assertEqual(2, mock_cinderclient.call_count) + + @mock.patch('nova.volume.cinder.cinderclient') def test_attachment_delete_bad_request_exception(self, mock_cinderclient): mock_cinderclient.return_value.attachments.delete.side_effect = ( cinder_exception.BadRequest(400)) @@ -1243,3 +1275,14 @@ class CinderClientTestCase(test.NoDBTestCase): admin_ctx = context.get_admin_context() params = cinder._get_cinderclient_parameters(admin_ctx) self.assertEqual(params[0], mock_admin_auth) + + @mock.patch('nova.service_auth._SERVICE_AUTH') + @mock.patch('nova.volume.cinder._ADMIN_AUTH') + def test_admin_context_without_user_token_but_with_service_token( + self, mock_admin_auth, mock_service_auth + ): + self.flags(send_service_user_token=True, group='service_user') + admin_ctx = context.get_admin_context() + params = cinder._get_cinderclient_parameters(admin_ctx) + self.assertEqual(mock_admin_auth, params[0].user_auth) + self.assertEqual(mock_service_auth, params[0].service_auth) |