summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml19
-rw-r--r--doc/source/admin/configuration/cross-cell-resize.rst2
-rw-r--r--doc/source/admin/configuration/index.rst1
-rw-r--r--doc/source/admin/configuration/service-user-token.rst59
-rw-r--r--doc/source/admin/index.rst1
-rw-r--r--doc/source/admin/live-migration-usage.rst2
-rw-r--r--doc/source/admin/migrate-instance-with-snapshot.rst2
-rw-r--r--doc/source/admin/support-compute.rst64
-rw-r--r--doc/source/admin/vdpa.rst92
-rw-r--r--doc/source/contributor/development-environment.rst2
-rw-r--r--doc/source/install/compute-install-obs.rst20
-rw-r--r--doc/source/install/compute-install-rdo.rst20
-rw-r--r--doc/source/install/compute-install-ubuntu.rst20
-rw-r--r--doc/source/install/controller-install-obs.rst20
-rw-r--r--doc/source/install/controller-install-rdo.rst20
-rw-r--r--doc/source/install/controller-install-ubuntu.rst20
-rw-r--r--nova/api/openstack/compute/flavor_access.py9
-rw-r--r--nova/api/openstack/compute/remote_consoles.py3
-rw-r--r--nova/api/openstack/compute/services.py7
-rw-r--r--nova/api/openstack/identity.py22
-rw-r--r--nova/cmd/manage.py4
-rw-r--r--nova/cmd/status.py11
-rw-r--r--nova/compute/api.py24
-rw-r--r--nova/compute/manager.py86
-rw-r--r--nova/compute/resource_tracker.py2
-rw-r--r--nova/conductor/manager.py34
-rw-r--r--nova/conductor/tasks/live_migrate.py5
-rw-r--r--nova/conductor/tasks/migrate.py4
-rw-r--r--nova/conf/compute.py21
-rw-r--r--nova/conf/workarounds.py7
-rw-r--r--nova/db/main/api.py7
-rw-r--r--nova/limit/placement.py6
-rw-r--r--nova/network/neutron.py95
-rw-r--r--nova/objects/cell_mapping.py12
-rw-r--r--nova/objects/pci_device.py19
-rw-r--r--nova/objects/request_spec.py1
-rw-r--r--nova/quota.py7
-rw-r--r--nova/scheduler/client/report.py46
-rw-r--r--nova/scheduler/manager.py2
-rw-r--r--nova/scheduler/request_filter.py2
-rw-r--r--nova/service_auth.py6
-rw-r--r--nova/test.py19
-rw-r--r--nova/tests/fixtures/libvirt.py68
-rw-r--r--nova/tests/fixtures/nova.py20
-rw-r--r--nova/tests/functional/api_sample_tests/test_remote_consoles.py20
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py10
-rw-r--r--nova/tests/functional/integrated_helpers.py3
-rw-r--r--nova/tests/functional/libvirt/base.py148
-rw-r--r--nova/tests/functional/libvirt/test_device_bus_migration.py8
-rw-r--r--nova/tests/functional/libvirt/test_numa_live_migration.py12
-rw-r--r--nova/tests/functional/libvirt/test_numa_servers.py6
-rw-r--r--nova/tests/functional/libvirt/test_pci_sriov_servers.py743
-rw-r--r--nova/tests/functional/libvirt/test_reshape.py21
-rw-r--r--nova/tests/functional/libvirt/test_vgpu.py35
-rw-r--r--nova/tests/functional/libvirt/test_vtpm.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1628606.py60
-rw-r--r--nova/tests/functional/regressions/test_bug_1781286.py30
-rw-r--r--nova/tests/functional/regressions/test_bug_1888395.py40
-rw-r--r--nova/tests/functional/regressions/test_bug_1890244.py96
-rw-r--r--nova/tests/functional/regressions/test_bug_1896463.py8
-rw-r--r--nova/tests/functional/regressions/test_bug_1951656.py73
-rw-r--r--nova/tests/functional/test_aggregates.py24
-rw-r--r--nova/tests/functional/test_images.py8
-rw-r--r--nova/tests/functional/test_server_group.py15
-rw-r--r--nova/tests/functional/test_server_rescue.py86
-rw-r--r--nova/tests/functional/test_servers.py19
-rw-r--r--nova/tests/unit/api/openstack/compute/test_create_backup.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py25
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hypervisors.py394
-rw-r--r--nova/tests/unit/api/openstack/compute/test_limits.py164
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrate_server.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quotas.py5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_remote_consoles.py12
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_actions.py38
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_group_quotas.py7
-rw-r--r--nova/tests/unit/api/openstack/compute/test_servers.py30
-rw-r--r--nova/tests/unit/api/openstack/compute/test_volumes.py10
-rw-r--r--nova/tests/unit/cmd/test_status.py16
-rw-r--r--nova/tests/unit/compute/test_api.py196
-rw-r--r--nova/tests/unit/compute/test_compute.py112
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py199
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py12
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py30
-rw-r--r--nova/tests/unit/conductor/test_conductor.py35
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py61
-rw-r--r--nova/tests/unit/db/main/test_api.py18
-rw-r--r--nova/tests/unit/network/test_neutron.py420
-rw-r--r--nova/tests/unit/objects/test_request_spec.py24
-rw-r--r--nova/tests/unit/pci/test_stats.py23
-rw-r--r--nova/tests/unit/policies/test_servers.py5
-rw-r--r--nova/tests/unit/scheduler/client/test_report.py55
-rw-r--r--nova/tests/unit/test_metadata.py23
-rw-r--r--nova/tests/unit/test_service_auth.py10
-rw-r--r--nova/tests/unit/test_test.py15
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py19
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeops.py26
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py26
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py141
-rw-r--r--nova/tests/unit/virt/libvirt/test_guest.py22
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py76
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py34
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py20
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_iscsi.py9
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_lightos.py29
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nvme.py25
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_scaleio.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_storpool.py16
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_vzstorage.py8
-rw-r--r--nova/tests/unit/virt/test_block_device.py239
-rw-r--r--nova/tests/unit/virt/test_images.py46
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_images.py8
-rw-r--r--nova/tests/unit/volume/test_cinder.py51
-rw-r--r--nova/virt/fake.py35
-rw-r--r--nova/virt/hyperv/vmops.py2
-rw-r--r--nova/virt/hyperv/volumeops.py12
-rw-r--r--nova/virt/images.py31
-rw-r--r--nova/virt/libvirt/config.py3
-rw-r--r--nova/virt/libvirt/driver.py74
-rw-r--r--nova/virt/libvirt/guest.py7
-rw-r--r--nova/virt/libvirt/host.py24
-rw-r--r--nova/virt/libvirt/utils.py28
-rw-r--r--nova/virt/libvirt/volume/fibrechannel.py7
-rw-r--r--nova/virt/libvirt/volume/fs.py2
-rw-r--r--nova/virt/libvirt/volume/iscsi.py7
-rw-r--r--nova/virt/libvirt/volume/lightos.py7
-rw-r--r--nova/virt/libvirt/volume/nvme.py6
-rw-r--r--nova/virt/libvirt/volume/quobyte.py2
-rw-r--r--nova/virt/libvirt/volume/scaleio.py7
-rw-r--r--nova/virt/libvirt/volume/smbfs.py2
-rw-r--r--nova/virt/libvirt/volume/storpool.py5
-rw-r--r--nova/virt/libvirt/volume/volume.py2
-rw-r--r--nova/virt/libvirt/volume/vzstorage.py5
-rw-r--r--nova/volume/cinder.py26
-rw-r--r--releasenotes/notes/bug-1942329-22b08fa4b322881d.yaml9
-rw-r--r--releasenotes/notes/bug-1978444-db46df5f3d5ea19e.yaml7
-rw-r--r--releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml9
-rw-r--r--releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml11
-rw-r--r--releasenotes/notes/fix-group-policy-validation-with-deleted-groups-4f685fd1d6b84192.yaml13
-rw-r--r--releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml6
-rw-r--r--releasenotes/notes/service-user-token-421d067c16257782.yaml11
-rw-r--r--releasenotes/notes/skip-hypervisor-version-check-on-lm-a87f2dcb4f8bf0f2.yaml13
-rw-r--r--releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml11
-rwxr-xr-xtools/check-cherry-picks.sh2
144 files changed, 4147 insertions, 1371 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index dcae6117a5..3866c9af9d 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -90,7 +90,7 @@
description: |
Run tempest live migration tests against local qcow2 ephemeral storage
and shared LVM/iSCSI cinder volumes.
- irrelevant-files: &nova-base-irrelevant-files
+ irrelevant-files:
- ^api-.*$
- ^(test-|)requirements.txt$
- ^.*\.rst$
@@ -101,6 +101,7 @@
- ^nova/policies/.*$
- ^nova/tests/.*$
- ^nova/test.py$
+ - ^nova/virt/ironic/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- ^tools/.*$
@@ -130,7 +131,21 @@
the "iptables_hybrid" securitygroup firewall driver, aka "hybrid plug".
The external events interactions between Nova and Neutron in these
situations has historically been fragile. This job exercises them.
- irrelevant-files: *nova-base-irrelevant-files
+ irrelevant-files: &nova-base-irrelevant-files
+ - ^api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/policies/.*$
+ - ^nova/tests/.*$
+ - ^nova/test.py$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tools/.*$
+ - ^tox.ini$
vars:
tox_envlist: all
tempest_test_regex: (^tempest\..*compute\..*(migration|resize|reboot).*)
diff --git a/doc/source/admin/configuration/cross-cell-resize.rst b/doc/source/admin/configuration/cross-cell-resize.rst
index e51e425774..0c34fd13f5 100644
--- a/doc/source/admin/configuration/cross-cell-resize.rst
+++ b/doc/source/admin/configuration/cross-cell-resize.rst
@@ -284,7 +284,7 @@ Troubleshooting
Timeouts
~~~~~~~~
-Configure a :ref:`service user <user_token_timeout>` in case the user token
+Configure a :ref:`service user <service_user_token>` in case the user token
times out, e.g. during the snapshot and download of a large server image.
If RPC calls are timing out with a ``MessagingTimeout`` error in the logs,
diff --git a/doc/source/admin/configuration/index.rst b/doc/source/admin/configuration/index.rst
index 233597b1fe..f5b6fde9da 100644
--- a/doc/source/admin/configuration/index.rst
+++ b/doc/source/admin/configuration/index.rst
@@ -19,6 +19,7 @@ A list of config options based on different topics can be found below:
.. toctree::
:maxdepth: 1
+ /admin/configuration/service-user-token
/admin/configuration/api
/admin/configuration/resize
/admin/configuration/cross-cell-resize
diff --git a/doc/source/admin/configuration/service-user-token.rst b/doc/source/admin/configuration/service-user-token.rst
new file mode 100644
index 0000000000..740730af1d
--- /dev/null
+++ b/doc/source/admin/configuration/service-user-token.rst
@@ -0,0 +1,59 @@
+.. _service_user_token:
+
+===================
+Service User Tokens
+===================
+
+.. note::
+
+ Configuration of service user tokens is **required** for every Nova service
+ for security reasons. See https://bugs.launchpad.net/nova/+bug/2004555 for
+ details.
+
+Configure Nova to send service user tokens alongside regular user tokens when
+making REST API calls to other services. The identity service (Keystone) will
+authenticate a request using the service user token if the regular user token
+has expired.
+
+This is important when long-running operations such as live migration or
+snapshot take long enough to exceed the expiry of the user token. Without the
+service token, if a long-running operation exceeds the expiry of the user
+token, post operations such as cleanup after a live migration could fail when
+Nova calls other service APIs like block-storage (Cinder) or networking
+(Neutron).
+
+The service token is also used by services to validate whether the API caller
+is a service. Some service APIs are restricted to service users only.
+
+To set up service tokens, create a ``nova`` service user and ``service`` role
+in the identity service (Keystone) and assign the ``service`` role to the
+``nova`` service user.
+
+Then, configure the :oslo.config:group:`service_user` section of the Nova
+configuration file, for example:
+
+.. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://104.130.216.102/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = secretservice
+ ...
+
+And configure the other identity options as necessary for the service user,
+much like you would configure nova to work with the image service (Glance) or
+networking service (Neutron).
+
+.. note::
+
+ Please note that the role assigned to the :oslo.config:group:`service_user`
+ needs to be in the configured
+ :oslo.config:option:`keystone_authtoken.service_token_roles` of other
+ services such as block-storage (Cinder), image (Glance), and networking
+ (Neutron).
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index e83f680df2..34babb5f15 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -198,6 +198,7 @@ instance for these kind of workloads.
virtual-gpu
file-backed-memory
ports-with-resource-requests
+ vdpa
virtual-persistent-memory
emulated-tpm
uefi
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index 783ab5e27c..a1e7f18756 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -320,4 +320,4 @@ To make live-migration succeed, you have several options:
If live migrations routinely timeout or fail during cleanup operations due
to the user token timing out, consider configuring nova to use
-:ref:`service user tokens <user_token_timeout>`.
+:ref:`service user tokens <service_user_token>`.
diff --git a/doc/source/admin/migrate-instance-with-snapshot.rst b/doc/source/admin/migrate-instance-with-snapshot.rst
index 65059679ab..230431091e 100644
--- a/doc/source/admin/migrate-instance-with-snapshot.rst
+++ b/doc/source/admin/migrate-instance-with-snapshot.rst
@@ -67,7 +67,7 @@ Create a snapshot of the instance
If snapshot operations routinely fail because the user token times out
while uploading a large disk image, consider configuring nova to use
- :ref:`service user tokens <user_token_timeout>`.
+ :ref:`service user tokens <service_user_token>`.
#. Use the :command:`openstack image list` command to check the status
until the status is ``ACTIVE``:
diff --git a/doc/source/admin/support-compute.rst b/doc/source/admin/support-compute.rst
index 8522e51d79..31e32fd1dd 100644
--- a/doc/source/admin/support-compute.rst
+++ b/doc/source/admin/support-compute.rst
@@ -478,67 +478,3 @@ Ensure the ``compute`` endpoint in the identity service catalog is pointing
at ``/v2.1`` instead of ``/v2``. The former route supports microversions,
while the latter route is considered the legacy v2.0 compatibility-mode
route which renders all requests as if they were made on the legacy v2.0 API.
-
-
-.. _user_token_timeout:
-
-User token times out during long-running operations
----------------------------------------------------
-
-Problem
-~~~~~~~
-
-Long-running operations such as live migration or snapshot can sometimes
-overrun the expiry of the user token. In such cases, post operations such
-as cleaning up after a live migration can fail when the nova-compute service
-needs to cleanup resources in other services, such as in the block-storage
-(cinder) or networking (neutron) services.
-
-For example:
-
-.. code-block:: console
-
- 2018-12-17 13:47:29.591 16987 WARNING nova.virt.libvirt.migration [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live migration not completed after 2400 sec
- 2018-12-17 13:47:30.097 16987 WARNING nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Migration operation was cancelled
- 2018-12-17 13:47:30.299 16987 ERROR nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live Migration failure: operation aborted: migration job: canceled by client: libvirtError: operation aborted: migration job: canceled by client
- 2018-12-17 13:47:30.685 16987 INFO nova.compute.manager [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Swapping old allocation on 3e32d595-bd1f-4136-a7f4-c6703d2fbe18 held by migration 17bec61d-544d-47e0-a1c1-37f9d7385286 for instance
- 2018-12-17 13:47:32.450 16987 ERROR nova.volume.cinder [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] Delete attachment failed for attachment 58997d5b-24f0-4073-819e-97916fb1ee19. Error: The request you have made requires authentication. (HTTP 401) Code: 401: Unauthorized: The request you have made requires authentication. (HTTP 401)
-
-Solution
-~~~~~~~~
-
-Configure nova to use service user tokens to supplement the regular user token
-used to initiate the operation. The identity service (keystone) will then
-authenticate a request using the service user token if the user token has
-already expired.
-
-To use, create a service user in the identity service similar as you would when
-creating the ``nova`` service user.
-
-Then configure the :oslo.config:group:`service_user` section of the nova
-configuration file, for example:
-
-.. code-block:: ini
-
- [service_user]
- send_service_user_token = True
- auth_type = password
- project_domain_name = Default
- project_name = service
- user_domain_name = Default
- password = secretservice
- username = nova
- auth_url = https://104.130.216.102/identity
- ...
-
-And configure the other identity options as necessary for the service user,
-much like you would configure nova to work with the image service (glance)
-or networking service.
-
-.. note::
-
- Please note that the role of the :oslo.config:group:`service_user` you
- configure needs to be a superset of
- :oslo.config:option:`keystone_authtoken.service_token_roles` (The option
- :oslo.config:option:`keystone_authtoken.service_token_roles` is configured
- in cinder, glance and neutron).
diff --git a/doc/source/admin/vdpa.rst b/doc/source/admin/vdpa.rst
new file mode 100644
index 0000000000..8583d327cc
--- /dev/null
+++ b/doc/source/admin/vdpa.rst
@@ -0,0 +1,92 @@
+============================
+Using ports vnic_type='vdpa'
+============================
+.. versionadded:: 23.0.0 (Wallaby)
+
+ Introduced support for vDPA.
+
+.. important::
+ The functionality described below is only supported by the
+ libvirt/KVM virt driver.
+
+The kernel vDPA (virtio Data Path Acceleration) framework
+provides a vendor independent framework for offloading data-plane
+processing to software or hardware virtio device backends.
+While the kernel vDPA framework supports many types of vDPA devices,
+at this time nova only support ``virtio-net`` devices
+using the ``vhost-vdpa`` front-end driver. Support for ``virtio-blk`` or
+``virtio-gpu`` may be added in the future but is not currently planned
+for any specific release.
+
+vDPA device tracking
+~~~~~~~~~~~~~~~~~~~~
+When implementing support for vDPA based neutron ports one of the first
+decisions nova had to make was how to model the availability of vDPA devices
+and the capability to virtualize vDPA devices. As the initial use-case
+for this technology was to offload networking to hardware offload OVS via
+neutron ports the decision was made to extend the existing PCI tracker that
+is used for SR-IOV and pci-passthrough to support vDPA devices. As a result
+a simplification was made to assume that the parent device of a vDPA device
+is an SR-IOV Virtual Function (VF). As a result software only vDPA device such
+as those created by the kernel ``vdpa-sim`` sample module are not supported.
+
+To make vDPA device available to be scheduled to guests the operator should
+include the device using the PCI address or vendor ID and product ID of the
+parent VF in the PCI ``device_spec``.
+See: :nova-doc:`pci-passthrough <admin/pci-passthrough>` for details.
+
+Nova will not create the VFs or vDPA devices automatically. It is expected
+that the operator will allocate them before starting the nova-compute agent.
+While no specific mechanisms is prescribed to do this udev rules or systemd
+service files are generally the recommended approach to ensure the devices
+are created consistently across reboots.
+
+.. note::
+ As vDPA is an offload only for the data plane and not the control plane a
+ vDPA control plane is required to properly support vDPA device passthrough.
+ At the time of writing only hardware offloaded OVS is supported when using
+ vDPA with nova. Because of this vDPA devices cannot be requested using the
+ PCI alias. While nova could allow vDPA devices to be requested by the
+ flavor using a PCI alias we would not be able to correctly configure the
+ device as there would be no suitable control plane. For this reason vDPA
+ devices are currently only consumable via neutron ports.
+
+Virt driver support
+~~~~~~~~~~~~~~~~~~~
+
+Supporting neutron ports with ``vnic_type=vdpa`` depends on the capability
+of the virt driver. At this time only the ``libvirt`` virt driver with KVM
+is fully supported. QEMU may also work but is untested.
+
+vDPA support depends on kernel 5.7+, Libvirt 6.9.0+ and QEMU 5.1+.
+
+vDPA lifecycle operations
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+At this time vDPA ports can only be added to a VM when it is first created.
+To do this the normal SR-IOV workflow is used where by the port is first created
+in neutron and passed into nova as part of the server create request.
+
+.. code-block:: bash
+
+ openstack port create --network <my network> --vnic-type vdpa vdpa-port
+ openstack server create --flavor <my-flavor> --image <my-image> --port <vdpa-port uuid> vdpa-vm
+
+When vDPA support was first introduced no move operations were supported.
+As this documentation was added in the change that enabled some move operations
+The following should be interpreted both as a retrospective and future looking
+viewpoint and treated as a living document which will be updated as functionality evolves.
+
+23.0.0: initial support is added for creating a VM with vDPA ports, move operations
+are blocked in the API but implemented in code.
+26.0.0: support for all move operation except live migration is tested and api blocks are removed.
+25.x.y: (planned) api block removal backported to stable/Yoga
+24.x.y: (planned) api block removal backported to stable/Xena
+23.x.y: (planned) api block removal backported to stable/wallaby
+26.0.0: (in progress) interface attach/detach, suspend/resume and hot plug live migration
+are implemented to fully support all lifecycle operations on instances with vDPA ports.
+
+.. note::
+ The ``(planned)`` and ``(in progress)`` qualifiers will be removed when those items are
+ completed. If your current version of the document contains those qualifiers then those
+ lifecycle operations are unsupported.
diff --git a/doc/source/contributor/development-environment.rst b/doc/source/contributor/development-environment.rst
index 32b8f8334e..3e19ef1ca2 100644
--- a/doc/source/contributor/development-environment.rst
+++ b/doc/source/contributor/development-environment.rst
@@ -197,7 +197,7 @@ Using fake computes for tests
The number of instances supported by fake computes is not limited by physical
constraints. It allows you to perform stress tests on a deployment with few
resources (typically a laptop). Take care to avoid using scheduler filters
-that will limit the number of instances per compute, such as ``AggregateCoreFilter``.
+that will limit the number of instances per compute, such as ``NumInstancesFilter``.
Fake computes can also be used in multi hypervisor-type deployments in order to
take advantage of fake and "real" computes during tests:
diff --git a/doc/source/install/compute-install-obs.rst b/doc/source/install/compute-install-obs.rst
index c5c1d29fb3..c227b6eba4 100644
--- a/doc/source/install/compute-install-obs.rst
+++ b/doc/source/install/compute-install-obs.rst
@@ -92,6 +92,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/compute-install-rdo.rst b/doc/source/install/compute-install-rdo.rst
index 0a5ad685a6..0c6203a667 100644
--- a/doc/source/install/compute-install-rdo.rst
+++ b/doc/source/install/compute-install-rdo.rst
@@ -84,6 +84,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/compute-install-ubuntu.rst b/doc/source/install/compute-install-ubuntu.rst
index 8605c73316..baf0585e52 100644
--- a/doc/source/install/compute-install-ubuntu.rst
+++ b/doc/source/install/compute-install-ubuntu.rst
@@ -74,6 +74,26 @@ Install and configure components
Comment out or remove any other options in the
``[keystone_authtoken]`` section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/controller-install-obs.rst b/doc/source/install/controller-install-obs.rst
index 18499612c3..01b7bb0f5a 100644
--- a/doc/source/install/controller-install-obs.rst
+++ b/doc/source/install/controller-install-obs.rst
@@ -260,6 +260,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/doc/source/install/controller-install-rdo.rst b/doc/source/install/controller-install-rdo.rst
index fd2419631e..b6098f1776 100644
--- a/doc/source/install/controller-install-rdo.rst
+++ b/doc/source/install/controller-install-rdo.rst
@@ -247,6 +247,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/doc/source/install/controller-install-ubuntu.rst b/doc/source/install/controller-install-ubuntu.rst
index 7282b0b2e2..1363a98ba8 100644
--- a/doc/source/install/controller-install-ubuntu.rst
+++ b/doc/source/install/controller-install-ubuntu.rst
@@ -237,6 +237,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/nova/api/openstack/compute/flavor_access.py b/nova/api/openstack/compute/flavor_access.py
index e17e6f0ddc..fc8df15db5 100644
--- a/nova/api/openstack/compute/flavor_access.py
+++ b/nova/api/openstack/compute/flavor_access.py
@@ -93,7 +93,14 @@ class FlavorActionController(wsgi.Controller):
vals = body['removeTenantAccess']
tenant = vals['tenant']
- identity.verify_project_id(context, tenant)
+ # It doesn't really matter if project exists or not: we can delete
+ # it from flavor's access list in both cases.
+ try:
+ identity.verify_project_id(context, tenant)
+ except webob.exc.HTTPBadRequest as identity_exc:
+ msg = "Project ID %s is not a valid project." % tenant
+ if msg not in identity_exc.explanation:
+ raise
# NOTE(gibi): We have to load a flavor from the db here as
# flavor.remove_access() will try to emit a notification and that needs
diff --git a/nova/api/openstack/compute/remote_consoles.py b/nova/api/openstack/compute/remote_consoles.py
index 36015542aa..7d374ef432 100644
--- a/nova/api/openstack/compute/remote_consoles.py
+++ b/nova/api/openstack/compute/remote_consoles.py
@@ -56,6 +56,9 @@ class RemoteConsolesController(wsgi.Controller):
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
+ except exception.InstanceInvalidState as e:
+ common.raise_http_conflict_for_instance_invalid_state(
+ e, 'get_vnc_console', id)
except NotImplementedError:
common.raise_feature_not_supported()
diff --git a/nova/api/openstack/compute/services.py b/nova/api/openstack/compute/services.py
index 6deb84a7f1..e9d51d4d0c 100644
--- a/nova/api/openstack/compute/services.py
+++ b/nova/api/openstack/compute/services.py
@@ -48,13 +48,10 @@ class ServiceController(wsgi.Controller):
self.actions = {"enable": self._enable,
"disable": self._disable,
"disable-log-reason": self._disable_log_reason}
- self._placementclient = None # Lazy-load on first access.
@property
def placementclient(self):
- if self._placementclient is None:
- self._placementclient = report.SchedulerReportClient()
- return self._placementclient
+ return report.report_client_singleton()
def _get_services(self, req):
# The API services are filtered out since they are not RPC services
@@ -328,7 +325,7 @@ class ServiceController(wsgi.Controller):
"Failed to delete compute node resource provider "
"for compute node %s: %s",
compute_node.uuid, str(e))
- # remove the host_mapping of this host.
+ # Remove the host_mapping of this host.
try:
hm = objects.HostMapping.get_by_host(context, service.host)
hm.destroy()
diff --git a/nova/api/openstack/identity.py b/nova/api/openstack/identity.py
index 7ffc623fed..15ec884aea 100644
--- a/nova/api/openstack/identity.py
+++ b/nova/api/openstack/identity.py
@@ -27,24 +27,27 @@ def verify_project_id(context, project_id):
"""verify that a project_id exists.
This attempts to verify that a project id exists. If it does not,
- an HTTPBadRequest is emitted.
+ an HTTPBadRequest is emitted. Also HTTPBadRequest is emitted
+ if Keystone identity service version 3.0 is not found.
"""
adap = utils.get_ksa_adapter(
'identity', ksa_auth=context.get_auth_plugin(),
min_version=(3, 0), max_version=(3, 'latest'))
- failure = webob.exc.HTTPBadRequest(
- explanation=_("Project ID %s is not a valid project.") %
- project_id)
try:
resp = adap.get('/projects/%s' % project_id)
except kse.EndpointNotFound:
LOG.error(
- "Keystone identity service version 3.0 was not found. This might "
- "be because your endpoint points to the v2.0 versioned endpoint "
- "which is not supported. Please fix this.")
- raise failure
+ "Keystone identity service version 3.0 was not found. This "
+ "might be caused by Nova misconfiguration or Keystone "
+ "problems.")
+ msg = _("Nova was unable to find Keystone service endpoint.")
+ # TODO(astupnik). It may be reasonable to switch to HTTP 503
+ # (HTTP Service Unavailable) instead of HTTP Bad Request here.
+ # If proper Keystone servie is inaccessible, then technially
+ # this is a server side error and not an error in Nova.
+ raise webob.exc.HTTPBadRequest(explanation=msg)
except kse.ClientException:
# something is wrong, like there isn't a keystone v3 endpoint,
# or nova isn't configured for the interface to talk to it;
@@ -57,7 +60,8 @@ def verify_project_id(context, project_id):
return True
elif resp.status_code == 404:
# we got access, and we know this project is not there
- raise failure
+ msg = _("Project ID %s is not a valid project.") % project_id
+ raise webob.exc.HTTPBadRequest(explanation=msg)
elif resp.status_code == 403:
# we don't have enough permission to verify this, so default
# to "it's ok".
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index f704a42698..7067facde7 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -2217,7 +2217,7 @@ class PlacementCommands(object):
output(_('No cells to process.'))
return 4
- placement = report.SchedulerReportClient()
+ placement = report.report_client_singleton()
neutron = None
if heal_port_allocations:
@@ -2718,7 +2718,7 @@ class PlacementCommands(object):
if verbose:
output = lambda msg: print(msg)
- placement = report.SchedulerReportClient()
+ placement = report.report_client_singleton()
# Resets two in-memory dicts for knowing instances per compute node
self.cn_uuid_mapping = collections.defaultdict(tuple)
self.instances_mapping = collections.defaultdict(list)
diff --git a/nova/cmd/status.py b/nova/cmd/status.py
index 8a7041b062..2f310f0871 100644
--- a/nova/cmd/status.py
+++ b/nova/cmd/status.py
@@ -336,6 +336,15 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+ def _check_service_user_token(self):
+ if not CONF.service_user.send_service_user_token:
+ msg = (_("""
+Service user token configuration is required for all Nova services.
+For more details see the following:
+https://docs.openstack.org/latest/nova/admin/configuration/service-user-token.html""")) # noqa
+ return upgradecheck.Result(upgradecheck.Code.FAILURE, msg)
+ return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+
# The format of the check functions is to return an upgradecheck.Result
# object with the appropriate upgradecheck.Code and details set. If the
# check hits warnings or failures then those should be stored in the
@@ -361,6 +370,8 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
(_('Older than N-1 computes'), _check_old_computes),
# Added in Wallaby
(_('hw_machine_type unset'), _check_machine_type_set),
+ # Added in Bobcat
+ (_('Service User Token Configuration'), _check_service_user_token),
)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 8758411d23..76c11658c2 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -384,7 +384,6 @@ class API:
self.image_api = image_api or glance.API()
self.network_api = network_api or neutron.API()
self.volume_api = volume_api or cinder.API()
- self._placementclient = None # Lazy-load on first access.
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.compute_task_api = conductor.ComputeTaskAPI()
self.servicegroup_api = servicegroup.API()
@@ -2573,9 +2572,7 @@ class API:
@property
def placementclient(self):
- if self._placementclient is None:
- self._placementclient = report.SchedulerReportClient()
- return self._placementclient
+ return report.report_client_singleton()
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
@@ -4096,9 +4093,6 @@ class API:
# finally split resize and cold migration into separate code paths
@block_extended_resource_request
@block_port_accelerators()
- # FIXME(sean-k-mooney): Cold migrate and resize to different hosts
- # probably works but they have not been tested so block them for now
- @reject_vdpa_instances(instance_actions.RESIZE)
@block_accelerators()
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
@@ -4337,10 +4331,7 @@ class API:
allow_same_host = CONF.allow_resize_to_same_host
return allow_same_host
- # FIXME(sean-k-mooney): Shelve works but unshelve does not due to bug
- # #1851545, so block it for now
@block_port_accelerators()
- @reject_vdpa_instances(instance_actions.SHELVE)
@reject_vtpm_instances(instance_actions.SHELVE)
@block_accelerators(until_service=54)
@check_instance_lock
@@ -4563,6 +4554,7 @@ class API:
allow_bfv_rescue=False):
"""Rescue the given instance."""
+ image_meta = None
if rescue_image_ref:
try:
image_meta = image_meta_obj.ImageMeta.from_image_ref(
@@ -4583,6 +4575,8 @@ class API:
"image properties set")
raise exception.UnsupportedRescueImage(
image=rescue_image_ref)
+ else:
+ image_meta = instance.image_meta
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -4591,6 +4585,9 @@ class API:
volume_backed = compute_utils.is_volume_backed_instance(
context, instance, bdms)
+ allow_bfv_rescue &= 'hw_rescue_bus' in image_meta.properties and \
+ 'hw_rescue_device' in image_meta.properties
+
if volume_backed and allow_bfv_rescue:
cn = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
@@ -5482,8 +5479,6 @@ class API:
@block_extended_resource_request
@block_port_accelerators()
- # FIXME(sean-k-mooney): rebuild works but we have not tested evacuate yet
- @reject_vdpa_instances(instance_actions.EVACUATE)
@reject_vtpm_instances(instance_actions.EVACUATE)
@block_accelerators(until_service=SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
@@ -6322,13 +6317,10 @@ class AggregateAPI:
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.query_client = query.SchedulerQueryClient()
- self._placement_client = None # Lazy-load on first access.
@property
def placement_client(self):
- if self._placement_client is None:
- self._placement_client = report.SchedulerReportClient()
- return self._placement_client
+ return report.report_client_singleton()
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 4df1c4112c..6e37a81a52 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -623,6 +623,11 @@ class ComputeManager(manager.Manager):
# We want the ComputeManager, ResourceTracker and ComputeVirtAPI all
# using the same instance of SchedulerReportClient which has the
# ProviderTree cache for this compute service.
+ # NOTE(danms): We do not use the global placement client
+ # singleton here, because the above-mentioned stack of objects
+ # maintain local state in the client. Thus, keeping our own
+ # private object for that stack avoids any potential conflict
+ # with other users in our process outside of the above.
self.reportclient = report.SchedulerReportClient()
self.virtapi = ComputeVirtAPI(self)
self.network_api = neutron.API()
@@ -1242,6 +1247,20 @@ class ComputeManager(manager.Manager):
'updated.', instance=instance)
self._set_instance_obj_error_state(instance)
return
+ except exception.PciDeviceNotFoundById:
+ # This is bug 1981813 where the bound port vnic_type has changed
+ # from direct to macvtap. Nova does not support that and it
+ # already printed an ERROR when the change is detected during
+ # _heal_instance_info_cache. Now we print an ERROR again and skip
+ # plugging the vifs but let the service startup continue to init
+ # the other instances
+ LOG.exception(
+ 'Virtual interface plugging failed for instance. Probably the '
+ 'vnic_type of the bound port has been changed. Nova does not '
+ 'support such change.',
+ instance=instance
+ )
+ return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
@@ -1718,27 +1737,32 @@ class ComputeManager(manager.Manager):
# hosts. This is a validation step to make sure that starting the
# instance here doesn't violate the policy.
if scheduler_hints is not None:
- # only go through here if scheduler_hints is provided, even if it
- # is empty.
+ # only go through here if scheduler_hints is provided,
+ # even if it is empty.
group_hint = scheduler_hints.get('group')
if not group_hint:
return
else:
- # The RequestSpec stores scheduler_hints as key=list pairs so
- # we need to check the type on the value and pull the single
- # entry out. The API request schema validates that
+ # The RequestSpec stores scheduler_hints as key=list pairs
+ # so we need to check the type on the value and pull the
+ # single entry out. The API request schema validates that
# the 'group' hint is a single value.
if isinstance(group_hint, list):
group_hint = group_hint[0]
-
- group = objects.InstanceGroup.get_by_hint(context, group_hint)
+ try:
+ group = objects.InstanceGroup.get_by_hint(
+ context, group_hint
+ )
+ except exception.InstanceGroupNotFound:
+ return
else:
# TODO(ganso): a call to DB can be saved by adding request_spec
# to rpcapi payload of live_migration, pre_live_migration and
# check_can_live_migrate_destination
try:
group = objects.InstanceGroup.get_by_instance_uuid(
- context, instance.uuid)
+ context, instance.uuid
+ )
except exception.InstanceGroupNotFound:
return
@@ -8572,8 +8596,9 @@ class ComputeManager(manager.Manager):
# host attachment. We fetch BDMs before that to retain connection_info
# and attachment_id relating to the source host for post migration
# cleanup.
- post_live_migration = functools.partial(self._post_live_migration,
- source_bdms=source_bdms)
+ post_live_migration = functools.partial(
+ self._post_live_migration_update_host, source_bdms=source_bdms
+ )
rollback_live_migration = functools.partial(
self._rollback_live_migration, source_bdms=source_bdms)
@@ -8845,6 +8870,42 @@ class ComputeManager(manager.Manager):
bdm.attachment_id, self.host,
str(e), instance=instance)
+ # TODO(sean-k-mooney): add typing
+ def _post_live_migration_update_host(
+ self, ctxt, instance, dest, block_migration=False,
+ migrate_data=None, source_bdms=None
+ ):
+ try:
+ self._post_live_migration(
+ ctxt, instance, dest, block_migration, migrate_data,
+ source_bdms)
+ except Exception:
+ # Restore the instance object
+ node_name = None
+ try:
+ # get node name of compute, where instance will be
+ # running after migration, that is destination host
+ compute_node = self._get_compute_info(ctxt, dest)
+ node_name = compute_node.hypervisor_hostname
+ except exception.ComputeHostNotFound:
+ LOG.exception('Failed to get compute_info for %s', dest)
+
+ # we can never rollback from post live migration and we can only
+ # get here if the instance is running on the dest so we ensure
+ # the instance.host is set correctly and reraise the original
+ # exception unmodified.
+ if instance.host != dest:
+ # apply saves the new fields while drop actually removes the
+ # migration context from the instance, so migration persists.
+ instance.apply_migration_context()
+ instance.drop_migration_context()
+ instance.host = dest
+ instance.task_state = None
+ instance.node = node_name
+ instance.progress = 0
+ instance.save()
+ raise
+
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance, dest,
@@ -8856,7 +8917,7 @@ class ComputeManager(manager.Manager):
and mainly updating database record.
:param ctxt: security context
- :param instance: instance dict
+ :param instance: instance object
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
@@ -10931,6 +10992,9 @@ class ComputeManager(manager.Manager):
profile['vf_num'] = pci_utils.get_vf_num_by_pci_address(
pci_dev.address)
+ if pci_dev.mac_address:
+ profile['device_mac_address'] = pci_dev.mac_address
+
mig_vif.profile = profile
LOG.debug("Updating migrate VIF profile for port %(port_id)s:"
"%(profile)s", {'port_id': port_id,
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 0b801f7ddf..058777d1ed 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -103,7 +103,7 @@ class ResourceTracker(object):
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
- self.reportclient = reportclient or report.SchedulerReportClient()
+ self.reportclient = reportclient or report.report_client_singleton()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 99e5514136..53067bbef7 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -21,6 +21,7 @@ import eventlet
import functools
import sys
+from keystoneauth1 import exceptions as ks_exc
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_limit import exception as limit_exceptions
@@ -243,11 +244,42 @@ class ComputeTaskManager:
self.network_api = neutron.API()
self.servicegroup_api = servicegroup.API()
self.query_client = query.SchedulerQueryClient()
- self.report_client = report.SchedulerReportClient()
self.notifier = rpc.get_notifier('compute')
# Help us to record host in EventReporter
self.host = CONF.host
+ try:
+ # Test our placement client during initialization
+ self.report_client
+ except (ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure) as e:
+ # Non-fatal, likely transient (although not definitely);
+ # continue startup but log the warning so that when things
+ # fail later, it will be clear why we can not do certain
+ # things.
+ LOG.warning('Unable to initialize placement client (%s); '
+ 'Continuing with startup, but some operations '
+ 'will not be possible.', e)
+ except (ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized) as e:
+ # This is almost definitely fatal mis-configuration. The
+ # Unauthorized error might be transient, but it is
+ # probably reasonable to consider it fatal.
+ LOG.error('Fatal error initializing placement client; '
+ 'config is incorrect or incomplete: %s', e)
+ raise
+ except Exception as e:
+ # Unknown/unexpected errors here are fatal
+ LOG.error('Fatal error initializing placement client: %s', e)
+ raise
+
+ @property
+ def report_client(self):
+ return report.report_client_singleton()
+
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py
index 1acae88b26..f8819b0dc8 100644
--- a/nova/conductor/tasks/live_migrate.py
+++ b/nova/conductor/tasks/live_migrate.py
@@ -347,8 +347,9 @@ class LiveMigrationTask(base.TaskBase):
source_version = source_info.hypervisor_version
destination_version = destination_info.hypervisor_version
- if source_version > destination_version:
- raise exception.DestinationHypervisorTooOld()
+ if not CONF.workarounds.skip_hypervisor_version_check_on_lm:
+ if source_version > destination_version:
+ raise exception.DestinationHypervisorTooOld()
return source_info, destination_info
def _call_livem_checks_on_host(self, destination, provider_mapping):
diff --git a/nova/conductor/tasks/migrate.py b/nova/conductor/tasks/migrate.py
index 6ff6206f65..8838d0240a 100644
--- a/nova/conductor/tasks/migrate.py
+++ b/nova/conductor/tasks/migrate.py
@@ -54,7 +54,7 @@ def replace_allocation_with_migration(context, instance, migration):
# and do any rollback required
raise
- reportclient = report.SchedulerReportClient()
+ reportclient = report.report_client_singleton()
orig_alloc = reportclient.get_allocs_for_consumer(
context, instance.uuid)['allocations']
@@ -94,7 +94,7 @@ def replace_allocation_with_migration(context, instance, migration):
def revert_allocation_for_migration(context, source_cn, instance, migration):
"""Revert an allocation made for a migration back to the instance."""
- reportclient = report.SchedulerReportClient()
+ reportclient = report.report_client_singleton()
# FIXME(gibi): This method is flawed in that it does not handle allocations
# against sharing providers in any special way. This leads to duplicate
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index 5abe7694f8..cd4e1706d4 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -426,9 +426,7 @@ allocation_ratio_opts = [
Virtual CPU to physical CPU allocation ratio.
This option is used to influence the hosts selected by the Placement API by
-configuring the allocation ratio for ``VCPU`` inventory. In addition, the
-``AggregateCoreFilter`` (deprecated) will fall back to this configuration value
-if no per-aggregate setting is found.
+configuring the allocation ratio for ``VCPU`` inventory.
.. note::
@@ -459,9 +457,7 @@ Related options:
Virtual RAM to physical RAM allocation ratio.
This option is used to influence the hosts selected by the Placement API by
-configuring the allocation ratio for ``MEMORY_MB`` inventory. In addition, the
-``AggregateRamFilter`` (deprecated) will fall back to this configuration value
-if no per-aggregate setting is found.
+configuring the allocation ratio for ``MEMORY_MB`` inventory.
.. note::
@@ -487,9 +483,7 @@ Related options:
Virtual disk to physical disk allocation ratio.
This option is used to influence the hosts selected by the Placement API by
-configuring the allocation ratio for ``DISK_GB`` inventory. In addition, the
-``AggregateDiskFilter`` (deprecated) will fall back to this configuration value
-if no per-aggregate setting is found.
+configuring the allocation ratio for ``DISK_GB`` inventory.
When configured, a ratio greater than 1.0 will result in over-subscription of
the available physical disk, which can be useful for more efficiently packing
@@ -1008,6 +1002,15 @@ Related options:
filtering computes based on supported image types, which is required
to be enabled for this to take effect.
"""),
+ cfg.ListOpt('vmdk_allowed_types',
+ default=['streamOptimized', 'monolithicSparse'],
+ help="""
+A list of strings describing allowed VMDK "create-type" subformats
+that will be allowed. This is recommended to only include
+single-file-with-sparse-header variants to avoid potential host file
+exposure due to processing named extents. If this list is empty, then no
+form of VMDK image will be allowed.
+"""),
cfg.BoolOpt('packing_host_numa_cells_allocation_strategy',
default=True,
help="""
diff --git a/nova/conf/workarounds.py b/nova/conf/workarounds.py
index 6c52eae8e5..2ec53282cd 100644
--- a/nova/conf/workarounds.py
+++ b/nova/conf/workarounds.py
@@ -410,6 +410,13 @@ with the destination host. When using QEMU >= 2.9 and libvirt >=
4.4.0, libvirt will do the correct thing with respect to checking CPU
compatibility on the destination host during live migration.
"""),
+ cfg.BoolOpt(
+ 'skip_hypervisor_version_check_on_lm',
+ default=False,
+ help="""
+When this is enabled, it will skip version-checking of hypervisors
+during live migration.
+"""),
]
diff --git a/nova/db/main/api.py b/nova/db/main/api.py
index 4c40be905e..39775d4f46 100644
--- a/nova/db/main/api.py
+++ b/nova/db/main/api.py
@@ -4176,6 +4176,12 @@ def _get_fk_stmts(metadata, conn, table, column, records):
fk_column = fk_table.c.id
for fk in fk_table.foreign_keys:
+ if table != fk.column.table:
+ # if the foreign key doesn't actually point to the table we're
+ # archiving entries from then it's not relevant; trying to
+ # resolve this would result in a cartesian product
+ continue
+
# We need to find the records in the referring (child) table that
# correspond to the records in our (parent) table so we can archive
# them.
@@ -4225,6 +4231,7 @@ def _get_fk_stmts(metadata, conn, table, column, records):
# deque.
fk_delete = fk_table.delete().where(fk_column.in_(fk_records))
deletes.appendleft(fk_delete)
+
# Repeat for any possible nested child tables.
i, d = _get_fk_stmts(metadata, conn, fk_table, fk_column, fk_records)
inserts.extendleft(i)
diff --git a/nova/limit/placement.py b/nova/limit/placement.py
index 497986c4ab..eedf7d69e1 100644
--- a/nova/limit/placement.py
+++ b/nova/limit/placement.py
@@ -43,10 +43,8 @@ LEGACY_LIMITS = {
def _get_placement_usages(
context: 'nova.context.RequestContext', project_id: str
) -> ty.Dict[str, int]:
- global PLACEMENT_CLIENT
- if not PLACEMENT_CLIENT:
- PLACEMENT_CLIENT = report.SchedulerReportClient()
- return PLACEMENT_CLIENT.get_usages_counts_for_limits(context, project_id)
+ return report.report_client_singleton().get_usages_counts_for_limits(
+ context, project_id)
def _get_usage(
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index 2021bdb58f..faf455d9b8 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -223,13 +223,15 @@ def _get_auth_plugin(context, admin=False):
# support some services (metadata API) where an admin context is used
# without an auth token.
global _ADMIN_AUTH
+ user_auth = None
if admin or (context.is_admin and not context.auth_token):
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
- return _ADMIN_AUTH
+ user_auth = _ADMIN_AUTH
- if context.auth_token:
- return service_auth.get_auth_plugin(context)
+ if context.auth_token or user_auth:
+ # When user_auth = None, user_auth will be extracted from the context.
+ return service_auth.get_auth_plugin(context, user_auth=user_auth)
# We did not get a user token and we should not be using
# an admin token so log an error
@@ -684,7 +686,8 @@ class API:
for profile_key in ('pci_vendor_info', 'pci_slot',
constants.ALLOCATION, 'arq_uuid',
'physical_network', 'card_serial_number',
- 'vf_num', 'pf_mac_address'):
+ 'vf_num', 'pf_mac_address',
+ 'device_mac_address'):
if profile_key in port_profile:
del port_profile[profile_key]
port_req_body['port'][constants.BINDING_PROFILE] = port_profile
@@ -1307,6 +1310,10 @@ class API:
network=network, neutron=neutron,
bind_host_id=bind_host_id,
port_arq=port_arq)
+ # NOTE(gibi): Remove this once we are sure that the fix for
+ # bug 1942329 is always present in the deployed neutron. The
+ # _populate_neutron_extension_values() call above already
+ # populated this MAC to the binding profile instead.
self._populate_pci_mac_address(instance,
request.pci_request_id, port_req_body)
@@ -1622,6 +1629,18 @@ class API:
if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_VF:
dev_profile.update(
self._get_vf_pci_device_profile(pci_dev))
+
+ if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF:
+ # In general the MAC address information flows fom the neutron
+ # port to the device in the backend. Except for direct-physical
+ # ports. In that case the MAC address flows from the physical
+ # device, the PF, to the neutron port. So when such a port is
+ # being bound to a host the port's MAC address needs to be
+ # updated. Nova needs to put the new MAC into the binding
+ # profile.
+ if pci_dev.mac_address:
+ dev_profile['device_mac_address'] = pci_dev.mac_address
+
return dev_profile
raise exception.PciDeviceNotFound(node_id=pci_dev.compute_node_id,
@@ -3366,6 +3385,25 @@ class API:
delegate_create=True,
)
+ def _log_error_if_vnic_type_changed(
+ self, port_id, old_vnic_type, new_vnic_type, instance
+ ):
+ if old_vnic_type and old_vnic_type != new_vnic_type:
+ LOG.error(
+ 'The vnic_type of the bound port %s has '
+ 'been changed in neutron from "%s" to '
+ '"%s". Changing vnic_type of a bound port '
+ 'is not supported by Nova. To avoid '
+ 'breaking the connectivity of the instance '
+ 'please change the port vnic_type back to '
+ '"%s".',
+ port_id,
+ old_vnic_type,
+ new_vnic_type,
+ old_vnic_type,
+ instance=instance
+ )
+
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None,
@@ -3439,6 +3477,12 @@ class API:
preexisting_port_ids)
for index, vif in enumerate(nw_info):
if vif['id'] == refresh_vif_id:
+ self._log_error_if_vnic_type_changed(
+ vif['id'],
+ vif['vnic_type'],
+ refreshed_vif['vnic_type'],
+ instance,
+ )
# Update the existing entry.
nw_info[index] = refreshed_vif
LOG.debug('Updated VIF entry in instance network '
@@ -3488,6 +3532,7 @@ class API:
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids, client)
+ old_nw_info = instance.get_network_info()
nw_info = network_model.NetworkInfo()
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
@@ -3495,6 +3540,14 @@ class API:
vif = self._build_vif_model(
context, client, current_neutron_port, networks,
preexisting_port_ids)
+ for old_vif in old_nw_info:
+ if old_vif['id'] == port_id:
+ self._log_error_if_vnic_type_changed(
+ port_id,
+ old_vif['vnic_type'],
+ vif['vnic_type'],
+ instance,
+ )
nw_info.append(vif)
elif nw_info_refresh:
LOG.info('Port %s from network info_cache is no '
@@ -3664,11 +3717,10 @@ class API:
migration.get('status') == 'reverted')
return instance.migration_context.get_pci_mapping_for_migration(revert)
- def _get_port_pci_dev(self, context, instance, port):
+ def _get_port_pci_dev(self, instance, port):
"""Find the PCI device corresponding to the port.
Assumes the port is an SRIOV one.
- :param context: The request context.
:param instance: The instance to which the port is attached.
:param port: The Neutron port, as obtained from the Neutron API
JSON form.
@@ -3694,25 +3746,6 @@ class API:
return None
return device
- def _update_port_pci_binding_profile(self, pci_dev, binding_profile):
- """Update the binding profile dict with new PCI device data.
-
- :param pci_dev: The PciDevice object to update the profile with.
- :param binding_profile: The dict to update.
- """
- binding_profile.update({'pci_slot': pci_dev.address})
- if binding_profile.get('card_serial_number'):
- binding_profile.update({
- 'card_serial_number': pci_dev.card_serial_number})
- if binding_profile.get('pf_mac_address'):
- binding_profile.update({
- 'pf_mac_address': pci_utils.get_mac_by_pci_address(
- pci_dev.parent_addr)})
- if binding_profile.get('vf_num'):
- binding_profile.update({
- 'vf_num': pci_utils.get_vf_num_by_pci_address(
- pci_dev.address)})
-
def _update_port_binding_for_instance(
self, context, instance, host, migration=None,
provider_mappings=None):
@@ -3775,14 +3808,14 @@ class API:
raise exception.PortUpdateFailed(port_id=p['id'],
reason=_("Unable to correlate PCI slot %s") %
pci_slot)
- # NOTE(artom) If migration is None, this is an unshevle, and we
- # need to figure out the pci_slot from the InstancePCIRequest
- # and PciDevice objects.
+ # NOTE(artom) If migration is None, this is an unshelve, and we
+ # need to figure out the pci related binding information from
+ # the InstancePCIRequest and PciDevice objects.
else:
- pci_dev = self._get_port_pci_dev(context, instance, p)
+ pci_dev = self._get_port_pci_dev(instance, p)
if pci_dev:
- self._update_port_pci_binding_profile(pci_dev,
- binding_profile)
+ binding_profile.update(
+ self._get_pci_device_profile(pci_dev))
updates[constants.BINDING_PROFILE] = binding_profile
# NOTE(gibi): during live migration the conductor already sets the
diff --git a/nova/objects/cell_mapping.py b/nova/objects/cell_mapping.py
index 595ec43e48..1355182420 100644
--- a/nova/objects/cell_mapping.py
+++ b/nova/objects/cell_mapping.py
@@ -279,11 +279,15 @@ class CellMappingList(base.ObjectListBase, base.NovaObject):
# SELECT DISTINCT cell_id FROM instance_mappings \
# WHERE project_id = $project_id;
cell_ids = context.session.query(
- api_db_models.InstanceMapping.cell_id).filter_by(
- project_id=project_id).distinct().subquery()
+ api_db_models.InstanceMapping.cell_id
+ ).filter_by(
+ project_id=project_id
+ ).distinct()
# SELECT cell_mappings WHERE cell_id IN ($cell_ids);
- return context.session.query(api_db_models.CellMapping).filter(
- api_db_models.CellMapping.id.in_(cell_ids)).all()
+ return context.session.query(
+ api_db_models.CellMapping).filter(
+ api_db_models.CellMapping.id.in_(cell_ids)
+ ).all()
@classmethod
def get_by_project_id(cls, context, project_id):
diff --git a/nova/objects/pci_device.py b/nova/objects/pci_device.py
index b0d5b75826..f30555849c 100644
--- a/nova/objects/pci_device.py
+++ b/nova/objects/pci_device.py
@@ -148,6 +148,12 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
reason='dev_type=%s not supported in version %s' % (
dev_type, target_version))
+ def __repr__(self):
+ return (
+ f'PciDevice(address={self.address}, '
+ f'compute_node_id={self.compute_node_id})'
+ )
+
def update_device(self, dev_dict):
"""Sync the content from device dictionary to device object.
@@ -175,6 +181,9 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
# NOTE(ralonsoh): list of parameters currently added to
# "extra_info" dict:
# - "capabilities": dict of (strings/list of strings)
+ # - "parent_ifname": the netdev name of the parent (PF)
+ # device of a VF
+ # - "mac_address": the MAC address of the PF
extra_info = self.extra_info
data = v if isinstance(v, str) else jsonutils.dumps(v)
extra_info.update({k: data})
@@ -566,6 +575,13 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
caps = jsonutils.loads(caps_json)
return caps.get('vpd', {}).get('card_serial_number')
+ @property
+ def mac_address(self):
+ """The MAC address of the PF physical device or None if the device is
+ not a PF or if the MAC is not available.
+ """
+ return self.extra_info.get('mac_address')
+
@base.NovaObjectRegistry.register
class PciDeviceList(base.ObjectListBase, base.NovaObject):
@@ -605,3 +621,6 @@ class PciDeviceList(base.ObjectListBase, base.NovaObject):
parent_addr)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list)
+
+ def __repr__(self):
+ return f"PciDeviceList(objects={[repr(obj) for obj in self.objects]})"
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
index 9ce77a4043..cc54293231 100644
--- a/nova/objects/request_spec.py
+++ b/nova/objects/request_spec.py
@@ -645,6 +645,7 @@ class RequestSpec(base.NovaObject):
except exception.InstanceGroupNotFound:
# NOTE(danms): Instance group may have been deleted
spec.instance_group = None
+ spec.scheduler_hints.pop('group', None)
if data_migrated:
spec.save()
diff --git a/nova/quota.py b/nova/quota.py
index b9dd763012..eafad4cd23 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -1348,11 +1348,8 @@ def _instances_cores_ram_count_legacy(context, project_id, user_id=None):
def _cores_ram_count_placement(context, project_id, user_id=None):
- global PLACEMENT_CLIENT
- if not PLACEMENT_CLIENT:
- PLACEMENT_CLIENT = report.SchedulerReportClient()
- return PLACEMENT_CLIENT.get_usages_counts_for_quota(context, project_id,
- user_id=user_id)
+ return report.report_client_singleton().get_usages_counts_for_quota(
+ context, project_id, user_id=user_id)
def _instances_cores_ram_count_api_db_placement(context, project_id,
diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py
index e4d0c8e3db..ff86527cf5 100644
--- a/nova/scheduler/client/report.py
+++ b/nova/scheduler/client/report.py
@@ -52,6 +52,7 @@ AGGREGATE_GENERATION_VERSION = '1.19'
NESTED_PROVIDER_API_VERSION = '1.14'
POST_ALLOCATIONS_API_VERSION = '1.13'
GET_USAGES_VERSION = '1.9'
+PLACEMENTCLIENT = None
AggInfo = collections.namedtuple('AggInfo', ['aggregates', 'generation'])
TraitInfo = collections.namedtuple('TraitInfo', ['traits', 'generation'])
@@ -67,6 +68,51 @@ def warn_limit(self, msg):
LOG.warning(msg)
+def report_client_singleton():
+ """Return a reference to the global placement client singleton.
+
+ This initializes the placement client once and returns a reference
+ to that singleton on subsequent calls. Errors are raised
+ (particularly ks_exc.*) but context-specific error messages are
+ logged for consistency.
+ """
+ # NOTE(danms): The report client maintains internal state in the
+ # form of the provider tree, which will be shared across all users
+ # of this global client. That is not a problem now, but in the
+ # future it may be beneficial to fix that. One idea would be to
+ # change the behavior of the client such that the static-config
+ # pieces of the actual keystone client are separate from the
+ # internal state, so that we can return a new object here with a
+ # context-specific local state object, but with the client bits
+ # shared.
+ global PLACEMENTCLIENT
+ if PLACEMENTCLIENT is None:
+ try:
+ PLACEMENTCLIENT = SchedulerReportClient()
+ except ks_exc.EndpointNotFound:
+ LOG.error('The placement API endpoint was not found.')
+ raise
+ except ks_exc.MissingAuthPlugin:
+ LOG.error('No authentication information found for placement API.')
+ raise
+ except ks_exc.Unauthorized:
+ LOG.error('Placement service credentials do not work.')
+ raise
+ except ks_exc.DiscoveryFailure:
+ LOG.error('Discovering suitable URL for placement API failed.')
+ raise
+ except (ks_exc.ConnectFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout):
+ LOG.error('Placement API service is not responding.')
+ raise
+ except Exception:
+ LOG.error('Failed to initialize placement client '
+ '(is keystone available?)')
+ raise
+ return PLACEMENTCLIENT
+
+
def safe_connect(f):
@functools.wraps(f)
def wrapper(self, *a, **k):
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 03df615f6a..10b330653d 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -66,7 +66,7 @@ class SchedulerManager(manager.Manager):
self.host_manager = host_manager.HostManager()
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('scheduler')
- self.placement_client = report.SchedulerReportClient()
+ self.placement_client = report.report_client_singleton()
super().__init__(service_name='scheduler', *args, **kwargs)
diff --git a/nova/scheduler/request_filter.py b/nova/scheduler/request_filter.py
index bd237b06ca..3f96b7a880 100644
--- a/nova/scheduler/request_filter.py
+++ b/nova/scheduler/request_filter.py
@@ -311,7 +311,7 @@ def routed_networks_filter(
# Get the clients we need
network_api = neutron.API()
- report_api = report.SchedulerReportClient()
+ report_api = report.report_client_singleton()
for requested_network in requested_networks:
network_id = None
diff --git a/nova/service_auth.py b/nova/service_auth.py
index f5ae0646d8..aa8fd8fa12 100644
--- a/nova/service_auth.py
+++ b/nova/service_auth.py
@@ -30,8 +30,10 @@ def reset_globals():
_SERVICE_AUTH = None
-def get_auth_plugin(context):
- user_auth = context.get_auth_plugin()
+def get_auth_plugin(context, user_auth=None):
+ # user_auth may be passed in when the RequestContext is anonymous, such as
+ # when get_admin_context() is used for API calls by nova-manage.
+ user_auth = user_auth or context.get_auth_plugin()
if CONF.service_user.send_service_user_token:
global _SERVICE_AUTH
diff --git a/nova/test.py b/nova/test.py
index a6449c01f0..4602f7d013 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -61,6 +61,7 @@ from nova import exception
from nova import objects
from nova.objects import base as objects_base
from nova import quota
+from nova.scheduler.client import report
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import matchers
from nova import utils
@@ -290,6 +291,9 @@ class TestCase(base.BaseTestCase):
# instead of only once initialized for test worker
wsgi_app.init_global_data.reset()
+ # Reset the placement client singleton
+ report.PLACEMENTCLIENT = None
+
def _setup_cells(self):
"""Setup a normal cellsv2 environment.
@@ -355,7 +359,7 @@ class TestCase(base.BaseTestCase):
self.useFixture(fixtures.MonkeyPatch(old, new))
@staticmethod
- def patch_exists(patched_path, result):
+ def patch_exists(patched_path, result, other=None):
"""Provide a static method version of patch_exists(), which if you
haven't already imported nova.test can be slightly easier to
use as a context manager within a test method via:
@@ -364,7 +368,7 @@ class TestCase(base.BaseTestCase):
with self.patch_exists(path, True):
...
"""
- return patch_exists(patched_path, result)
+ return patch_exists(patched_path, result, other)
@staticmethod
def patch_open(patched_path, read_data):
@@ -848,10 +852,12 @@ class ContainKeyValue(object):
@contextlib.contextmanager
-def patch_exists(patched_path, result):
+def patch_exists(patched_path, result, other=None):
"""Selectively patch os.path.exists() so that if it's called with
patched_path, return result. Calls with any other path are passed
- through to the real os.path.exists() function.
+ through to the real os.path.exists() function if other is not provided.
+ If other is provided then that will be the result of the call on paths
+ other than patched_path.
Either import and use as a decorator / context manager, or use the
nova.TestCase.patch_exists() static method as a context manager.
@@ -885,7 +891,10 @@ def patch_exists(patched_path, result):
def fake_exists(path):
if path == patched_path:
return result
- return real_exists(path)
+ elif other is not None:
+ return other
+ else:
+ return real_exists(path)
with mock.patch.object(os.path, "exists") as mock_exists:
mock_exists.side_effect = fake_exists
diff --git a/nova/tests/fixtures/libvirt.py b/nova/tests/fixtures/libvirt.py
index 891e957200..5ccf01e40f 100644
--- a/nova/tests/fixtures/libvirt.py
+++ b/nova/tests/fixtures/libvirt.py
@@ -309,7 +309,7 @@ class FakePCIDevice(object):
self, dev_type, bus, slot, function, iommu_group, numa_node, *,
vf_ratio=None, multiple_gpu_types=False, generic_types=False,
parent=None, vend_id=None, vend_name=None, prod_id=None,
- prod_name=None, driver_name=None, vpd_fields=None
+ prod_name=None, driver_name=None, vpd_fields=None, mac_address=None,
):
"""Populate pci devices
@@ -331,6 +331,8 @@ class FakePCIDevice(object):
:param prod_id: (str) The product ID.
:param prod_name: (str) The product name.
:param driver_name: (str) The driver name.
+ :param mac_address: (str) The MAC of the device.
+ Used in case of SRIOV PFs
"""
self.dev_type = dev_type
@@ -349,6 +351,7 @@ class FakePCIDevice(object):
self.prod_id = prod_id
self.prod_name = prod_name
self.driver_name = driver_name
+ self.mac_address = mac_address
self.vpd_fields = vpd_fields
@@ -364,7 +367,9 @@ class FakePCIDevice(object):
assert not self.vf_ratio, 'vf_ratio does not apply for PCI devices'
if self.dev_type in ('PF', 'VF'):
- assert self.vf_ratio, 'require vf_ratio for PFs and VFs'
+ assert (
+ self.vf_ratio is not None
+ ), 'require vf_ratio for PFs and VFs'
if self.dev_type == 'VF':
assert self.parent, 'require parent for VFs'
@@ -497,6 +502,10 @@ class FakePCIDevice(object):
def XMLDesc(self, flags):
return self.pci_device
+ @property
+ def address(self):
+ return "0000:%02x:%02x.%1x" % (self.bus, self.slot, self.function)
+
# TODO(stephenfin): Remove all of these HostFooDevicesInfo objects in favour of
# a unified devices object
@@ -609,7 +618,7 @@ class HostPCIDevicesInfo(object):
self, dev_type, bus, slot, function, iommu_group, numa_node,
vf_ratio=None, multiple_gpu_types=False, generic_types=False,
parent=None, vend_id=None, vend_name=None, prod_id=None,
- prod_name=None, driver_name=None, vpd_fields=None,
+ prod_name=None, driver_name=None, vpd_fields=None, mac_address=None,
):
pci_dev_name = _get_libvirt_nodedev_name(bus, slot, function)
@@ -632,6 +641,7 @@ class HostPCIDevicesInfo(object):
prod_name=prod_name,
driver_name=driver_name,
vpd_fields=vpd_fields,
+ mac_address=mac_address,
)
self.devices[pci_dev_name] = dev
return dev
@@ -651,6 +661,13 @@ class HostPCIDevicesInfo(object):
return [dev for dev in self.devices
if self.devices[dev].is_capable_of_mdevs]
+ def get_pci_address_mac_mapping(self):
+ return {
+ device.address: device.mac_address
+ for dev_addr, device in self.devices.items()
+ if device.mac_address
+ }
+
class FakeMdevDevice(object):
template = """
@@ -2182,6 +2199,15 @@ class LibvirtFixture(fixtures.Fixture):
def __init__(self, stub_os_vif=True):
self.stub_os_vif = stub_os_vif
+ self.pci_address_to_mac_map = collections.defaultdict(
+ lambda: '52:54:00:1e:59:c6')
+
+ def update_sriov_mac_address_mapping(self, pci_address_to_mac_map):
+ self.pci_address_to_mac_map.update(pci_address_to_mac_map)
+
+ def fake_get_mac_by_pci_address(self, pci_addr, pf_interface=False):
+ res = self.pci_address_to_mac_map[pci_addr]
+ return res
def setUp(self):
super().setUp()
@@ -2194,31 +2220,39 @@ class LibvirtFixture(fixtures.Fixture):
self.useFixture(
fixtures.MockPatch('nova.virt.libvirt.utils.get_fs_info'))
- self.useFixture(
- fixtures.MockPatch('nova.compute.utils.get_machine_ips'))
+ self.mock_get_machine_ips = self.useFixture(
+ fixtures.MockPatch('nova.compute.utils.get_machine_ips')).mock
# libvirt driver needs to call out to the filesystem to get the
# parent_ifname for the SRIOV VFs.
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_ifname_by_pci_address',
- return_value='fake_pf_interface_name'))
+ self.mock_get_ifname_by_pci_address = self.useFixture(
+ fixtures.MockPatch(
+ "nova.pci.utils.get_ifname_by_pci_address",
+ return_value="fake_pf_interface_name",
+ )
+ ).mock
self.useFixture(fixtures.MockPatch(
'nova.pci.utils.get_mac_by_pci_address',
- return_value='52:54:00:1e:59:c6'))
+ side_effect=self.fake_get_mac_by_pci_address))
# libvirt calls out to sysfs to get the vfs ID during macvtap plug
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1))
+ self.mock_get_vf_num_by_pci_address = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1
+ )
+ ).mock
# libvirt calls out to privsep to set the mac and vlan of a macvtap
- self.useFixture(fixtures.MockPatch(
- 'nova.privsep.linux_net.set_device_macaddr_and_vlan'))
+ self.mock_set_device_macaddr_and_vlan = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.privsep.linux_net.set_device_macaddr_and_vlan')).mock
# libvirt calls out to privsep to set the port state during macvtap
# plug
- self.useFixture(fixtures.MockPatch(
- 'nova.privsep.linux_net.set_device_macaddr'))
+ self.mock_set_device_macaddr = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.privsep.linux_net.set_device_macaddr')).mock
# Don't assume that the system running tests has a valid machine-id
self.useFixture(fixtures.MockPatch(
@@ -2233,8 +2267,8 @@ class LibvirtFixture(fixtures.Fixture):
# Ensure tests perform the same on all host architectures
fake_uname = os_uname(
'Linux', '', '5.4.0-0-generic', '', obj_fields.Architecture.X86_64)
- self.useFixture(
- fixtures.MockPatch('os.uname', return_value=fake_uname))
+ self.mock_uname = self.useFixture(
+ fixtures.MockPatch('os.uname', return_value=fake_uname)).mock
# ...and on all machine types
fake_loaders = [
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index 810c6f62dd..f9e011dd67 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -904,6 +904,16 @@ class WarningsFixture(fixtures.Fixture):
message='Implicit coercion of SELECT and textual SELECT .*',
category=sqla_exc.SADeprecationWarning)
+ # Enable general SQLAlchemy warnings also to ensure we're not doing
+ # silly stuff. It's possible that we'll need to filter things out here
+ # with future SQLAlchemy versions, but that's a good thing
+
+ warnings.filterwarnings(
+ 'error',
+ module='nova',
+ category=sqla_exc.SAWarning,
+ )
+
self.addCleanup(self._reset_warning_filters)
def _reset_warning_filters(self):
@@ -1032,9 +1042,15 @@ class OSAPIFixture(fixtures.Fixture):
self.api = client.TestOpenStackClient(
'fake', base_url, project_id=self.project_id,
roles=['reader', 'member'])
+ self.alternative_api = client.TestOpenStackClient(
+ 'fake', base_url, project_id=self.project_id,
+ roles=['reader', 'member'])
self.admin_api = client.TestOpenStackClient(
'admin', base_url, project_id=self.project_id,
roles=['reader', 'member', 'admin'])
+ self.alternative_admin_api = client.TestOpenStackClient(
+ 'admin', base_url, project_id=self.project_id,
+ roles=['reader', 'member', 'admin'])
self.reader_api = client.TestOpenStackClient(
'reader', base_url, project_id=self.project_id,
roles=['reader'])
@@ -1130,9 +1146,9 @@ class PoisonFunctions(fixtures.Fixture):
# Don't poison the function if it's already mocked
import nova.virt.libvirt.host
if not isinstance(nova.virt.libvirt.host.Host._init_events, mock.Mock):
- self.useFixture(fixtures.MockPatch(
+ self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.Host._init_events',
- side_effect=evloop))
+ evloop))
class IndirectionAPIFixture(fixtures.Fixture):
diff --git a/nova/tests/functional/api_sample_tests/test_remote_consoles.py b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
index 986826bfee..e304402ee9 100644
--- a/nova/tests/functional/api_sample_tests/test_remote_consoles.py
+++ b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
@@ -13,6 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova.compute import api as compute
+from nova import exception
from nova.tests.functional.api_sample_tests import test_servers
HTTP_RE = r'(https?://)([\w\d:#@%/;$()~_?\+-=\\.&](#!)?)*'
@@ -38,6 +42,22 @@ class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
self._verify_response('get-vnc-console-post-resp', {'url': HTTP_RE},
response, 200)
+ @mock.patch.object(compute.API, 'get_vnc_console')
+ def test_get_vnc_console_instance_invalid_state(self,
+ mock_get_vnc_console):
+ uuid = self._post_server()
+
+ def fake_get_vnc_console(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ attr='fake_attr', state='fake_state', method='fake_method',
+ instance_uuid=uuid)
+
+ mock_get_vnc_console.side_effect = fake_get_vnc_console
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ self.assertEqual(409, response.status_code)
+
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index 81b7dfb68c..758c15f371 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -29,7 +29,6 @@ from nova import conf
from nova import context
from nova import objects
from nova import test
-from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.virt import driver as virt_driver
@@ -694,15 +693,6 @@ class TestProviderConfig(integrated_helpers.ProviderUsageBaseTestCase):
feature a vm cannot be spawning using a custom trait and then start a
compute service that provides that trait.
"""
-
- self.useFixture(nova_fixtures.NeutronFixture(self))
- self.useFixture(nova_fixtures.GlanceFixture(self))
-
- # Start nova services.
- self.api = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1')).admin_api
- self.api.microversion = 'latest'
- self.start_service('conductor')
# start nova-compute that will not have the additional trait.
self._start_compute("fake-host-1")
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index 028ef53d7e..bd6244546c 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -540,8 +540,9 @@ class InstanceHelperMixin:
self.api.post_server_action(
server['id'],
{'os-migrateLive': {'host': None, 'block_migration': 'auto'}})
- self._wait_for_state_change(server, server_expected_state)
+ server = self._wait_for_state_change(server, server_expected_state)
self._wait_for_migration_status(server, [migration_expected_state])
+ return server
_live_migrate_server = _live_migrate
diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py
index 3d8aec8106..68c6e294c1 100644
--- a/nova/tests/functional/libvirt/base.py
+++ b/nova/tests/functional/libvirt/base.py
@@ -42,7 +42,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
super(ServersTestBase, self).setUp()
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
- self.useFixture(nova_fixtures.LibvirtFixture())
+ self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
self.useFixture(nova_fixtures.OSBrickFixture())
self.useFixture(fixtures.MockPatch(
@@ -51,12 +51,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128, 'used': 44, 'free': 84}))
- self.useFixture(fixtures.MockPatch(
+ self.mock_is_valid_hostname = self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname',
- return_value=True))
- self.useFixture(fixtures.MockPatch(
+ return_value=True)).mock
+ self.mock_file_open = self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.file_open',
- side_effect=lambda *a, **k: io.BytesIO(b'')))
+ side_effect=lambda *a, **k: io.BytesIO(b''))).mock
self.useFixture(fixtures.MockPatch(
'nova.privsep.utils.supports_direct_io',
return_value=True))
@@ -114,7 +114,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
def start_compute(
self, hostname='compute1', host_info=None, pci_info=None,
mdev_info=None, vdpa_info=None, libvirt_version=None,
- qemu_version=None,
+ qemu_version=None, cell_name=None, connection=None
):
"""Start a compute service.
@@ -124,27 +124,53 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
:param host_info: A fakelibvirt.HostInfo object for the host. Defaults
to a HostInfo with 2 NUMA nodes, 2 cores per node, 2 threads per
core, and 16GB of RAM.
+ :param connection: A fake libvirt connection. You should not provide it
+ directly. However it is used by restart_compute_service to
+ implement restart without loosing the hypervisor state.
:returns: The hostname of the created service, which can be used to
lookup the created service and UUID of the assocaited resource
provider.
"""
+ if connection and (
+ host_info or
+ pci_info or
+ mdev_info or
+ vdpa_info or
+ libvirt_version or
+ qemu_version
+ ):
+ raise ValueError(
+ "Either an existing connection instance can be provided or a "
+ "list of parameters for a new connection"
+ )
def _start_compute(hostname, host_info):
- fake_connection = self._get_connection(
- host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
- qemu_version, hostname,
- )
+ if connection:
+ fake_connection = connection
+ else:
+ fake_connection = self._get_connection(
+ host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
+ qemu_version, hostname,
+ )
+
+ # If the compute is configured with PCI devices then we need to
+ # make sure that the stubs around sysfs has the MAC address
+ # information for the PCI PF devices
+ if pci_info:
+ self.libvirt.update_sriov_mac_address_mapping(
+ pci_info.get_pci_address_mac_mapping())
# This is fun. Firstly we need to do a global'ish mock so we can
# actually start the service.
- with mock.patch('nova.virt.libvirt.host.Host.get_connection',
- return_value=fake_connection):
- compute = self.start_service('compute', host=hostname)
- # Once that's done, we need to tweak the compute "service" to
- # make sure it returns unique objects. We do this inside the
- # mock context to avoid a small window between the end of the
- # context and the tweaking where get_connection would revert to
- # being an autospec mock.
- compute.driver._host.get_connection = lambda: fake_connection
+ orig_con = self.mock_conn.return_value
+ self.mock_conn.return_value = fake_connection
+ compute = self.start_service(
+ 'compute', host=hostname, cell_name=cell_name)
+ # Once that's done, we need to tweak the compute "service" to
+ # make sure it returns unique objects.
+ compute.driver._host.get_connection = lambda: fake_connection
+ # Then we revert the local mock tweaking so the next compute can
+ # get its own
+ self.mock_conn.return_value = orig_con
return compute
# ensure we haven't already registered services with these hostnames
@@ -159,6 +185,74 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
return hostname
+ def restart_compute_service(
+ self,
+ hostname,
+ host_info=None,
+ pci_info=None,
+ mdev_info=None,
+ vdpa_info=None,
+ libvirt_version=None,
+ qemu_version=None,
+ keep_hypervisor_state=True,
+ ):
+ """Stops the service and starts a new one to have realistic restart
+
+ :param hostname: the hostname of the nova-compute service to be
+ restarted
+ :param keep_hypervisor_state: If True then we reuse the fake connection
+ from the existing driver. If False a new connection will be created
+ based on the other parameters provided
+ """
+ # We are intentionally not calling super() here. Nova's base test class
+ # defines starting and restarting compute service with a very
+ # different signatures and also those calls are cannot be made aware of
+ # the intricacies of the libvirt fixture. So we simply hide that
+ # implementation.
+
+ if keep_hypervisor_state and (
+ host_info or
+ pci_info or
+ mdev_info or
+ vdpa_info or
+ libvirt_version or
+ qemu_version
+ ):
+ raise ValueError(
+ "Either keep_hypervisor_state=True or a list of libvirt "
+ "parameters can be provided but not both"
+ )
+
+ compute = self.computes.pop(hostname)
+ self.compute_rp_uuids.pop(hostname)
+
+ # NOTE(gibi): The service interface cannot be used to simulate a real
+ # service restart as the manager object will not be recreated after a
+ # service.stop() and service.start() therefore the manager state will
+ # survive. For example the resource tracker will not be recreated after
+ # a stop start. The service.kill() call cannot help as it deletes
+ # the service from the DB which is unrealistic and causes that some
+ # operation that refers to the killed host (e.g. evacuate) fails.
+ # So this helper method will stop the original service and then starts
+ # a brand new compute service for the same host and node. This way
+ # a new ComputeManager instance will be created and initialized during
+ # the service startup.
+ compute.stop()
+
+ # this service was running previously, so we have to make sure that
+ # we restart it in the same cell
+ cell_name = self.host_mappings[compute.host].cell_mapping.name
+
+ old_connection = compute.manager.driver._get_connection()
+
+ self.start_compute(
+ hostname, host_info, pci_info, mdev_info, vdpa_info,
+ libvirt_version, qemu_version, cell_name,
+ old_connection if keep_hypervisor_state else None
+ )
+
+ return self.computes[hostname]
+
class LibvirtMigrationMixin(object):
"""A simple mixin to facilliate successful libvirt live migrations
@@ -392,6 +486,22 @@ class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
'binding:vnic_type': 'remote-managed',
}
+ network_4_port_pf = {
+ 'id': 'c6f51315-9202-416f-9e2f-eb78b3ac36d9',
+ 'network_id': network_4['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': 'b5:bc:2e:e7:51:01',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.4.8',
+ 'subnet_id': subnet_4['id']
+ }
+ ],
+ 'binding:vif_details': {'vlan': 42},
+ 'binding:vif_type': 'hostdev_physical',
+ 'binding:vnic_type': 'direct-physical',
+ }
+
def __init__(self, test):
super(LibvirtNeutronFixture, self).__init__(test)
self._networks = {
diff --git a/nova/tests/functional/libvirt/test_device_bus_migration.py b/nova/tests/functional/libvirt/test_device_bus_migration.py
index 82a0d4556e..3852e31c68 100644
--- a/nova/tests/functional/libvirt/test_device_bus_migration.py
+++ b/nova/tests/functional/libvirt/test_device_bus_migration.py
@@ -51,7 +51,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase):
def _assert_stashed_image_properties_persist(self, server, properties):
# Assert the stashed properties persist across a host reboot
- self.restart_compute_service(self.compute)
+ self.restart_compute_service(self.compute_hostname)
self._assert_stashed_image_properties(server['id'], properties)
# Assert the stashed properties persist across a guest reboot
@@ -173,7 +173,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase):
self.flags(pointer_model='ps2mouse')
# Restart compute to pick up ps2 setting, which means the guest will
# not get a prescribed pointer device
- self.restart_compute_service(self.compute)
+ self.restart_compute_service(self.compute_hostname)
# Create a server with default image properties
default_image_properties1 = {
@@ -187,7 +187,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase):
# Assert the defaults persist across a host flag change
self.flags(pointer_model='usbtablet')
# Restart compute to pick up usb setting
- self.restart_compute_service(self.compute)
+ self.restart_compute_service(self.compute_hostname)
self._assert_stashed_image_properties(
server1['id'], default_image_properties1)
@@ -216,7 +216,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase):
# https://bugs.launchpad.net/nova/+bug/1866106
self.flags(pointer_model=None)
# Restart compute to pick up None setting
- self.restart_compute_service(self.compute)
+ self.restart_compute_service(self.compute_hostname)
self._assert_stashed_image_properties(
server1['id'], default_image_properties1)
self._assert_stashed_image_properties(
diff --git a/nova/tests/functional/libvirt/test_numa_live_migration.py b/nova/tests/functional/libvirt/test_numa_live_migration.py
index 2f3897d6b2..0e504d2df2 100644
--- a/nova/tests/functional/libvirt/test_numa_live_migration.py
+++ b/nova/tests/functional/libvirt/test_numa_live_migration.py
@@ -206,10 +206,8 @@ class NUMALiveMigrationPositiveTests(NUMALiveMigrationPositiveBase):
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
- self.computes['host_a'] = self.restart_compute_service(
- self.computes['host_a'])
- self.computes['host_b'] = self.restart_compute_service(
- self.computes['host_b'])
+ self.restart_compute_service('host_a')
+ self.restart_compute_service('host_b')
# Live migrate, RPC-pinning the destination host if asked
if pin_dest:
@@ -333,10 +331,8 @@ class NUMALiveMigrationRollbackTests(NUMALiveMigrationPositiveBase):
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
- self.computes['host_a'] = self.restart_compute_service(
- self.computes['host_a'])
- self.computes['host_b'] = self.restart_compute_service(
- self.computes['host_b'])
+ self.restart_compute_service('host_a')
+ self.restart_compute_service('host_b')
# Live migrate, RPC-pinning the destination host if asked. This is a
# rollback test, so server_a is expected to remain on host_a.
diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py
index fd09a11e20..8fd9729404 100644
--- a/nova/tests/functional/libvirt/test_numa_servers.py
+++ b/nova/tests/functional/libvirt/test_numa_servers.py
@@ -1187,10 +1187,8 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
self.flags(cpu_dedicated_set='0-7', group='compute')
self.flags(vcpu_pin_set=None)
- computes = {}
- for host, compute in self.computes.items():
- computes[host] = self.restart_compute_service(compute)
- self.computes = computes
+ for host in list(self.computes.keys()):
+ self.restart_compute_service(host)
# verify that the inventory, usages and allocation are correct after
# the reshape
diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
index c98a7534d1..6e5165134b 100644
--- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py
+++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
@@ -28,6 +28,7 @@ from oslo_utils import units
import nova
from nova import context
+from nova import exception
from nova.network import constants
from nova import objects
from nova.objects import fields
@@ -366,31 +367,66 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
expect_fail=False):
# The purpose here is to force an observable PCI slot update when
# moving from source to dest. This is accomplished by having a single
- # PCI device on the source, 2 PCI devices on the test, and relying on
- # the fact that our fake HostPCIDevicesInfo creates predictable PCI
- # addresses. The PCI device on source and the first PCI device on dest
- # will have identical PCI addresses. By sticking a "placeholder"
- # instance on that first PCI device on the dest, the incoming instance
- # from source will be forced to consume the second dest PCI device,
- # with a different PCI address.
+ # PCI VF device on the source, 2 PCI VF devices on the dest, and
+ # relying on the fact that our fake HostPCIDevicesInfo creates
+ # predictable PCI addresses. The PCI VF device on source and the first
+ # PCI VF device on dest will have identical PCI addresses. By sticking
+ # a "placeholder" instance on that first PCI VF device on the dest, the
+ # incoming instance from source will be forced to consume the second
+ # dest PCI VF device, with a different PCI address.
+ # We want to test server operations with SRIOV VFs and SRIOV PFs so
+ # the config of the compute hosts also have one extra PCI PF devices
+ # without any VF children. But the two compute has different PCI PF
+ # addresses and MAC so that the test can observe the slot update as
+ # well as the MAC updated during migration and after revert.
+ source_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
+ # add an extra PF without VF to be used by direct-physical ports
+ source_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:aa',
+ )
self.start_compute(
hostname='source',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=1, num_vfs=1))
+ pci_info=source_pci_info)
+
+ dest_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ # add an extra PF without VF to be used by direct-physical ports
+ dest_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x6, # make it different from the source host
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:bb',
+ )
self.start_compute(
hostname='dest',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=1, num_vfs=2))
+ pci_info=dest_pci_info)
source_port = self.neutron.create_port(
{'port': self.neutron.network_4_port_1})
+ source_pf_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_pf})
dest_port1 = self.neutron.create_port(
{'port': self.neutron.network_4_port_2})
dest_port2 = self.neutron.create_port(
{'port': self.neutron.network_4_port_3})
source_server = self._create_server(
- networks=[{'port': source_port['port']['id']}], host='source')
+ networks=[
+ {'port': source_port['port']['id']},
+ {'port': source_pf_port['port']['id']}
+ ],
+ host='source',
+ )
dest_server1 = self._create_server(
networks=[{'port': dest_port1['port']['id']}], host='dest')
dest_server2 = self._create_server(
@@ -398,6 +434,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# Refresh the ports.
source_port = self.neutron.show_port(source_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
dest_port1 = self.neutron.show_port(dest_port1['port']['id'])
dest_port2 = self.neutron.show_port(dest_port2['port']['id'])
@@ -413,11 +450,24 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
same_slot_port = dest_port2
self._delete_server(dest_server1)
- # Before moving, explictly assert that the servers on source and dest
+ # Before moving, explicitly assert that the servers on source and dest
# have the same pci_slot in their port's binding profile
self.assertEqual(source_port['port']['binding:profile']['pci_slot'],
same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the source host PF PCI device.
+ self.assertEqual(
+ '0000:82:00.0', # which is in sync with the source host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the source host
+ self.assertEqual(
+ 'b4:96:91:34:f4:aa',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+
# Before moving, assert that the servers on source and dest have the
# same PCI source address in their XML for their SRIOV nic.
source_conn = self.computes['source'].driver._host.get_connection()
@@ -434,14 +484,28 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
move_operation(source_server)
# Refresh the ports again, keeping in mind the source_port is now bound
- # on the dest after unshelving.
+ # on the dest after the move.
source_port = self.neutron.show_port(source_port['port']['id'])
same_slot_port = self.neutron.show_port(same_slot_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
self.assertNotEqual(
source_port['port']['binding:profile']['pci_slot'],
same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the dest host PF PCI device.
+ self.assertEqual(
+ '0000:82:06.0', # which is in sync with the dest host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the dest host
+ self.assertEqual(
+ 'b4:96:91:34:f4:bb',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+
conn = self.computes['dest'].driver._host.get_connection()
vms = [vm._def for vm in conn._vms.values()]
self.assertEqual(2, len(vms))
@@ -469,6 +533,169 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
self._confirm_resize(source_server)
self._test_move_operation_with_neutron(move_operation)
+ def test_cold_migrate_and_rever_server_with_neutron(self):
+ # The purpose here is to force an observable PCI slot update when
+ # moving from source to dest and the from dest to source after the
+ # revert. This is accomplished by having a single
+ # PCI VF device on the source, 2 PCI VF devices on the dest, and
+ # relying on the fact that our fake HostPCIDevicesInfo creates
+ # predictable PCI addresses. The PCI VF device on source and the first
+ # PCI VF device on dest will have identical PCI addresses. By sticking
+ # a "placeholder" instance on that first PCI VF device on the dest, the
+ # incoming instance from source will be forced to consume the second
+ # dest PCI VF device, with a different PCI address.
+ # We want to test server operations with SRIOV VFs and SRIOV PFs so
+ # the config of the compute hosts also have one extra PCI PF devices
+ # without any VF children. But the two compute has different PCI PF
+ # addresses and MAC so that the test can observe the slot update as
+ # well as the MAC updated during migration and after revert.
+ source_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
+ # add an extra PF without VF to be used by direct-physical ports
+ source_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:aa',
+ )
+ self.start_compute(
+ hostname='source',
+ pci_info=source_pci_info)
+ dest_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ # add an extra PF without VF to be used by direct-physical ports
+ dest_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x6, # make it different from the source host
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:bb',
+ )
+ self.start_compute(
+ hostname='dest',
+ pci_info=dest_pci_info)
+ source_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_1})
+ source_pf_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_pf})
+ dest_port1 = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_2})
+ dest_port2 = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_3})
+ source_server = self._create_server(
+ networks=[
+ {'port': source_port['port']['id']},
+ {'port': source_pf_port['port']['id']}
+ ],
+ host='source',
+ )
+ dest_server1 = self._create_server(
+ networks=[{'port': dest_port1['port']['id']}], host='dest')
+ dest_server2 = self._create_server(
+ networks=[{'port': dest_port2['port']['id']}], host='dest')
+ # Refresh the ports.
+ source_port = self.neutron.show_port(source_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
+ dest_port1 = self.neutron.show_port(dest_port1['port']['id'])
+ dest_port2 = self.neutron.show_port(dest_port2['port']['id'])
+ # Find the server on the dest compute that's using the same pci_slot as
+ # the server on the source compute, and delete the other one to make
+ # room for the incoming server from the source.
+ source_pci_slot = source_port['port']['binding:profile']['pci_slot']
+ dest_pci_slot1 = dest_port1['port']['binding:profile']['pci_slot']
+ if dest_pci_slot1 == source_pci_slot:
+ same_slot_port = dest_port1
+ self._delete_server(dest_server2)
+ else:
+ same_slot_port = dest_port2
+ self._delete_server(dest_server1)
+ # Before moving, explicitly assert that the servers on source and dest
+ # have the same pci_slot in their port's binding profile
+ self.assertEqual(source_port['port']['binding:profile']['pci_slot'],
+ same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the source host PF PCI device.
+ self.assertEqual(
+ '0000:82:00.0', # which is in sync with the source host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the source host
+ self.assertEqual(
+ 'b4:96:91:34:f4:aa',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+ # Before moving, assert that the servers on source and dest have the
+ # same PCI source address in their XML for their SRIOV nic.
+ source_conn = self.computes['source'].driver._host.get_connection()
+ dest_conn = self.computes['source'].driver._host.get_connection()
+ source_vms = [vm._def for vm in source_conn._vms.values()]
+ dest_vms = [vm._def for vm in dest_conn._vms.values()]
+ self.assertEqual(1, len(source_vms))
+ self.assertEqual(1, len(dest_vms))
+ self.assertEqual(1, len(source_vms[0]['devices']['nics']))
+ self.assertEqual(1, len(dest_vms[0]['devices']['nics']))
+ self.assertEqual(source_vms[0]['devices']['nics'][0]['source'],
+ dest_vms[0]['devices']['nics'][0]['source'])
+
+ # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
+ # probably be less...dumb
+ with mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}'):
+ self._migrate_server(source_server)
+
+ # Refresh the ports again, keeping in mind the ports are now bound
+ # on the dest after migrating.
+ source_port = self.neutron.show_port(source_port['port']['id'])
+ same_slot_port = self.neutron.show_port(same_slot_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
+ self.assertNotEqual(
+ source_port['port']['binding:profile']['pci_slot'],
+ same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the dest host PF PCI device.
+ self.assertEqual(
+ '0000:82:06.0', # which is in sync with the dest host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the dest host
+ self.assertEqual(
+ 'b4:96:91:34:f4:bb',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+ conn = self.computes['dest'].driver._host.get_connection()
+ vms = [vm._def for vm in conn._vms.values()]
+ self.assertEqual(2, len(vms))
+ for vm in vms:
+ self.assertEqual(1, len(vm['devices']['nics']))
+ self.assertNotEqual(vms[0]['devices']['nics'][0]['source'],
+ vms[1]['devices']['nics'][0]['source'])
+
+ self._revert_resize(source_server)
+
+ # Refresh the ports again, keeping in mind the ports are now bound
+ # on the source as the migration is reverted
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
+
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the source host PF PCI device.
+ self.assertEqual(
+ '0000:82:00.0', # which is in sync with the source host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the source host
+ self.assertEqual(
+ 'b4:96:91:34:f4:aa',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+
def test_evacuate_server_with_neutron(self):
def move_operation(source_server):
# Down the source compute to enable the evacuation
@@ -486,17 +713,44 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
"""
# start two compute services with differing PCI device inventory
- self.start_compute(
- hostname='test_compute0',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=2, num_vfs=8, numa_node=0))
- self.start_compute(
- hostname='test_compute1',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=1, num_vfs=2, numa_node=1))
+ source_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=2, num_vfs=8, numa_node=0)
+ # add an extra PF without VF to be used by direct-physical ports
+ source_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:aa',
+ )
+ self.start_compute(hostname='test_compute0', pci_info=source_pci_info)
- # create the port
- self.neutron.create_port({'port': self.neutron.network_4_port_1})
+ dest_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=2, numa_node=1)
+ # add an extra PF without VF to be used by direct-physical ports
+ dest_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x6, # make it different from the source host
+ function=0,
+ iommu_group=42,
+ # numa node needs to be aligned with the other pci devices in this
+ # host as the instance needs to fit into a single host numa node
+ numa_node=1,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:bb',
+ )
+
+ self.start_compute(hostname='test_compute1', pci_info=dest_pci_info)
+
+ # create the ports
+ port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_1})['port']
+ pf_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_pf})['port']
# create a server using the VF via neutron
extra_spec = {'hw:cpu_policy': 'dedicated'}
@@ -504,7 +758,8 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
server = self._create_server(
flavor_id=flavor_id,
networks=[
- {'port': base.LibvirtNeutronFixture.network_4_port_1['id']},
+ {'port': port['id']},
+ {'port': pf_port['id']},
],
host='test_compute0',
)
@@ -512,8 +767,8 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# our source host should have marked two PCI devices as used, the VF
# and the parent PF, while the future destination is currently unused
self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host'])
- self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
- self.assertPCIDeviceCounts('test_compute1', total=3, free=3)
+ self.assertPCIDeviceCounts('test_compute0', total=11, free=8)
+ self.assertPCIDeviceCounts('test_compute1', total=4, free=4)
# the instance should be on host NUMA node 0, since that's where our
# PCI devices are
@@ -544,13 +799,26 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
port['binding:profile'],
)
+ # ensure the binding details sent to "neutron" are correct
+ pf_port = self.neutron.show_port(pf_port['id'],)['port']
+ self.assertIn('binding:profile', pf_port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '8086:1528',
+ 'pci_slot': '0000:82:00.0',
+ 'physical_network': 'physnet4',
+ 'device_mac_address': 'b4:96:91:34:f4:aa',
+ },
+ pf_port['binding:profile'],
+ )
+
# now live migrate that server
self._live_migrate(server, 'completed')
# we should now have transitioned our usage to the destination, freeing
# up the source in the process
- self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
- self.assertPCIDeviceCounts('test_compute1', total=3, free=1)
+ self.assertPCIDeviceCounts('test_compute0', total=11, free=11)
+ self.assertPCIDeviceCounts('test_compute1', total=4, free=1)
# the instance should now be on host NUMA node 1, since that's where
# our PCI devices are for this second host
@@ -577,6 +845,18 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
},
port['binding:profile'],
)
+ # ensure the binding details sent to "neutron" are correct
+ pf_port = self.neutron.show_port(pf_port['id'],)['port']
+ self.assertIn('binding:profile', pf_port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '8086:1528',
+ 'pci_slot': '0000:82:06.0',
+ 'physical_network': 'physnet4',
+ 'device_mac_address': 'b4:96:91:34:f4:bb',
+ },
+ pf_port['binding:profile'],
+ )
def test_get_server_diagnostics_server_with_VF(self):
"""Ensure server disagnostics include info on VF-type PCI devices."""
@@ -635,11 +915,8 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# Disable SRIOV capabilties in PF and delete the VFs
self._disable_sriov_in_pf(pci_info_no_sriov)
- fake_connection = self._get_connection(pci_info=pci_info_no_sriov,
- hostname='test_compute0')
- self.mock_conn.return_value = fake_connection
-
- self.compute = self.start_service('compute', host='test_compute0')
+ self.start_compute('test_compute0', pci_info=pci_info_no_sriov)
+ self.compute = self.computes['test_compute0']
ctxt = context.get_admin_context()
pci_devices = objects.PciDeviceList.get_by_compute_node(
@@ -651,13 +928,9 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
self.assertEqual(1, len(pci_devices))
self.assertEqual('type-PCI', pci_devices[0].dev_type)
- # Update connection with original pci info with sriov PFs
- fake_connection = self._get_connection(pci_info=pci_info,
- hostname='test_compute0')
- self.mock_conn.return_value = fake_connection
-
- # Restart the compute service
- self.restart_compute_service(self.compute)
+ # Restart the compute service with sriov PFs
+ self.restart_compute_service(
+ self.compute.host, pci_info=pci_info, keep_hypervisor_state=False)
# Verify if PCI devices are of type type-PF or type-VF
pci_devices = objects.PciDeviceList.get_by_compute_node(
@@ -679,6 +952,88 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
],
)
+ def test_change_bound_port_vnic_type_kills_compute_at_restart(self):
+ """Create a server with a direct port and change the vnic_type of the
+ bound port to macvtap. Then restart the compute service.
+
+ As the vnic_type is changed on the port but the vif_type is hwveb
+ instead of macvtap the vif plug logic will try to look up the netdev
+ of the parent VF. Howvere that VF consumed by the instance so the
+ netdev does not exists. This causes that the compute service will fail
+ with an exception during startup
+ """
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ self.start_compute(pci_info=pci_info)
+
+ # create a direct port
+ port = self.neutron.network_4_port_1
+ self.neutron.create_port({'port': port})
+
+ # create a server using the VF via neutron
+ server = self._create_server(networks=[{'port': port['id']}])
+
+ # update the vnic_type of the port in neutron
+ port = copy.deepcopy(port)
+ port['binding:vnic_type'] = 'macvtap'
+ self.neutron.update_port(port['id'], {"port": port})
+
+ compute = self.computes['compute1']
+
+ # Force an update on the instance info cache to ensure nova gets the
+ # information about the updated port
+ with context.target_cell(
+ context.get_admin_context(),
+ self.host_mappings['compute1'].cell_mapping
+ ) as cctxt:
+ compute.manager._heal_instance_info_cache(cctxt)
+ self.assertIn(
+ 'The vnic_type of the bound port %s has been changed in '
+ 'neutron from "direct" to "macvtap". Changing vnic_type of a '
+ 'bound port is not supported by Nova. To avoid breaking the '
+ 'connectivity of the instance please change the port '
+ 'vnic_type back to "direct".' % port['id'],
+ self.stdlog.logger.output,
+ )
+
+ def fake_get_ifname_by_pci_address(pci_addr: str, pf_interface=False):
+ # we want to fail the netdev lookup only if the pci_address is
+ # already consumed by our instance. So we look into the instance
+ # definition to see if the device is attached to the instance as VF
+ conn = compute.manager.driver._host.get_connection()
+ dom = conn.lookupByUUIDString(server['id'])
+ dev = dom._def['devices']['nics'][0]
+ lookup_addr = pci_addr.replace(':', '_').replace('.', '_')
+ if (
+ dev['type'] == 'hostdev' and
+ dev['source'] == 'pci_' + lookup_addr
+ ):
+ # nova tried to look up the netdev of an already consumed VF.
+ # So we have to fail
+ raise exception.PciDeviceNotFoundById(id=pci_addr)
+
+ # We need to simulate the actual failure manually as in our functional
+ # environment all the PCI lookup is mocked. In reality nova tries to
+ # look up the netdev of the pci device on the host used by the port as
+ # the parent of the macvtap. However, as the originally direct port is
+ # bound to the instance, the VF pci device is already consumed by the
+ # instance and therefore there is no netdev for the VF.
+ with mock.patch(
+ 'nova.pci.utils.get_ifname_by_pci_address',
+ side_effect=fake_get_ifname_by_pci_address,
+ ):
+ # Nova cannot prevent the vnic_type change on a bound port. Neutron
+ # should prevent that instead. But the nova-compute should still
+ # be able to start up and only log an ERROR for this instance in
+ # inconsistent state.
+ self.restart_compute_service('compute1')
+
+ self.assertIn(
+ 'Virtual interface plugging failed for instance. Probably the '
+ 'vnic_type of the bound port has been changed. Nova does not '
+ 'support such change.',
+ self.stdlog.logger.output,
+ )
+
class SRIOVAttachDetachTest(_PCIServersTestBase):
# no need for aliases as these test will request SRIOV via neutron
@@ -742,10 +1097,9 @@ class SRIOVAttachDetachTest(_PCIServersTestBase):
host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2)
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
- fake_connection = self._get_connection(host_info, pci_info)
- self.mock_conn.return_value = fake_connection
-
- self.compute = self.start_service('compute', host='test_compute0')
+ self.start_compute(
+ 'test_compute0', host_info=host_info, pci_info=pci_info)
+ self.compute = self.computes['test_compute0']
# Create server with a port
server = self._create_server(networks=[{'port': first_port_id}])
@@ -834,7 +1188,7 @@ class VDPAServersTest(_PCIServersTestBase):
# fixture already stubbed.
self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
- def start_compute(self):
+ def start_vdpa_compute(self, hostname='compute-0'):
vf_ratio = self.NUM_VFS // self.NUM_PFS
pci_info = fakelibvirt.HostPCIDevicesInfo(
@@ -872,7 +1226,7 @@ class VDPAServersTest(_PCIServersTestBase):
driver_name='mlx5_core')
vdpa_info.add_device(f'vdpa_vdpa{idx}', idx, vf)
- return super().start_compute(
+ return super().start_compute(hostname=hostname,
pci_info=pci_info, vdpa_info=vdpa_info,
libvirt_version=self.FAKE_LIBVIRT_VERSION,
qemu_version=self.FAKE_QEMU_VERSION)
@@ -927,7 +1281,7 @@ class VDPAServersTest(_PCIServersTestBase):
fake_create,
)
- hostname = self.start_compute()
+ hostname = self.start_vdpa_compute()
num_pci = self.NUM_PFS + self.NUM_VFS
# both the PF and VF with vDPA capabilities (dev_type=vdpa) should have
@@ -960,12 +1314,16 @@ class VDPAServersTest(_PCIServersTestBase):
port['binding:profile'],
)
- def _test_common(self, op, *args, **kwargs):
- self.start_compute()
-
+ def _create_port_and_server(self):
# create the port and a server, with the port attached to the server
vdpa_port = self.create_vdpa_port()
server = self._create_server(networks=[{'port': vdpa_port['id']}])
+ return vdpa_port, server
+
+ def _test_common(self, op, *args, **kwargs):
+ self.start_vdpa_compute()
+
+ vdpa_port, server = self._create_port_and_server()
# attempt the unsupported action and ensure it fails
ex = self.assertRaises(
@@ -976,13 +1334,11 @@ class VDPAServersTest(_PCIServersTestBase):
ex.response.text)
def test_attach_interface(self):
- self.start_compute()
-
+ self.start_vdpa_compute()
# create the port and a server, but don't attach the port to the server
# yet
vdpa_port = self.create_vdpa_port()
server = self._create_server(networks='none')
-
# attempt to attach the port to the server
ex = self.assertRaises(
client.OpenStackApiException,
@@ -994,21 +1350,282 @@ class VDPAServersTest(_PCIServersTestBase):
def test_detach_interface(self):
self._test_common(self._detach_interface, uuids.vdpa_port)
- def test_shelve(self):
- self._test_common(self._shelve_server)
+ def test_shelve_offload(self):
+ hostname = self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # assert the port is bound to the vm and the compute host
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ self.assertEqual(hostname, port['binding:host_id'])
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ # -2 we claim the vdpa device which make the parent PF unavailable
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ server = self._shelve_server(server)
+ # now that the vm is shelve offloaded it should not be bound
+ # to any host but should still be owned by the vm
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ # FIXME(sean-k-mooney): we should be unbinding the port from
+ # the host when we shelve offload but we don't today.
+ # This is unrelated to vdpa port and is a general issue.
+ self.assertEqual(hostname, port['binding:host_id'])
+ self.assertIn('binding:profile', port)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:host'])
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
- def test_suspend(self):
- self._test_common(self._suspend_server)
+ def test_unshelve_to_same_host(self):
+ hostname = self.start_vdpa_compute()
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(hostname, port['binding:host_id'])
+
+ server = self._shelve_server(server)
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ # FIXME(sean-k-mooney): shelve offload should unbind the port
+ # self.assertEqual('', port['binding:host_id'])
+ self.assertEqual(hostname, port['binding:host_id'])
+
+ server = self._unshelve_server(server)
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(hostname, port['binding:host_id'])
+
+ def test_unshelve_to_different_host(self):
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(source, port['binding:host_id'])
+
+ server = self._shelve_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ # FIXME(sean-k-mooney): shelve should unbind the port
+ # self.assertEqual('', port['binding:host_id'])
+ self.assertEqual(source, port['binding:host_id'])
+
+ # force the unshelve to the other host
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ server = self._unshelve_server(server)
+ # the dest devices should be claimed
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ # and the source host devices should still be free
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(dest, port['binding:host_id'])
def test_evacute(self):
- self._test_common(self._evacuate_server)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
- def test_resize(self):
- flavor_id = self._create_flavor()
- self._test_common(self._resize_server, flavor_id)
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(source, port['binding:host_id'])
+
+ # stop the source compute and enable the dest
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ self.computes['source'].stop()
+ # Down the source compute to enable the evacuation
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'forced_down': True})
+
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ server = self._evacuate_server(server)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(dest, port['binding:host_id'])
+
+ # as the source compute is offline the pci claims will not be cleaned
+ # up on the source compute.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # but if you fix/restart the source node the allocations for evacuated
+ # instances should be released.
+ self.restart_compute_service(source)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+
+ def test_resize_same_host(self):
+ self.flags(allow_resize_to_same_host=True)
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ source = self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # before we resize the vm should be using 1 VF but that will mark
+ # the PF as unavailable so we assert 2 devices are in use.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ # in resize verify the VF claims should be doubled even
+ # for same host resize so assert that 3 are in devices in use
+ # 1 PF and 2 VFs .
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 3)
+ server = self._confirm_resize(server)
+ # but once we confrim it should be reduced back to 1 PF and 1 VF
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # assert the hostname has not have changed as part
+ # of the resize.
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_resize_different_host(self):
+ self.flags(allow_resize_to_same_host=False)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ # disable the source compute and enable the dest
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._confirm_resize(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_resize_revert(self):
+ self.flags(allow_resize_to_same_host=False)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ # disable the source compute and enable the dest
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ # in resize verify both the dest and source pci claims should be
+ # present.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._revert_resize(server)
+ # but once we revert the dest claims should be freed.
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
def test_cold_migrate(self):
- self._test_common(self._migrate_server)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # enable the dest we do not need to disable the source since cold
+ # migrate wont happen to the same host in the libvirt driver
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._migrate_server(server)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._confirm_resize(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_suspend(self):
+ self._test_common(self._suspend_server)
class PCIServersTest(_PCIServersTestBase):
diff --git a/nova/tests/functional/libvirt/test_reshape.py b/nova/tests/functional/libvirt/test_reshape.py
index 5c73ffbf5f..d0102f1247 100644
--- a/nova/tests/functional/libvirt/test_reshape.py
+++ b/nova/tests/functional/libvirt/test_reshape.py
@@ -30,17 +30,7 @@ LOG = logging.getLogger(__name__)
class VGPUReshapeTests(base.ServersTestBase):
- @mock.patch('nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
- return_value={'total': 128,
- 'used': 44,
- 'free': 84})
- @mock.patch('nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname',
- return_value=True)
- @mock.patch('nova.virt.libvirt.driver.libvirt_utils.file_open',
- side_effect=[io.BytesIO(b''), io.BytesIO(b''),
- io.BytesIO(b'')])
- def test_create_servers_with_vgpu(
- self, mock_file_open, mock_valid_hostname, mock_get_fs_info):
+ def test_create_servers_with_vgpu(self):
"""Verify that vgpu reshape works with libvirt driver
1) create two servers with an old tree where the VGPU resource is on
@@ -49,7 +39,8 @@ class VGPUReshapeTests(base.ServersTestBase):
3) check that the allocations of the servers are still valid
4) create another server now against the new tree
"""
-
+ self.mock_file_open.side_effect = [
+ io.BytesIO(b''), io.BytesIO(b''), io.BytesIO(b'')]
# NOTE(gibi): We cannot simply ask the virt driver to create an old
# RP tree with vgpu on the root RP as that code path does not exist
# any more. So we have to hack a "bit". We will create a compute
@@ -81,11 +72,11 @@ class VGPUReshapeTests(base.ServersTestBase):
# ignore the content of the above HostMdevDeviceInfo
self.flags(enabled_mdev_types='', group='devices')
- hostname = self.start_compute(
+ self.hostname = self.start_compute(
hostname='compute1',
mdev_info=fakelibvirt.HostMdevDevicesInfo(devices=mdevs),
)
- self.compute = self.computes[hostname]
+ self.compute = self.computes[self.hostname]
# create the VGPU resource in placement manually
compute_rp_uuid = self.placement.get(
@@ -167,7 +158,7 @@ class VGPUReshapeTests(base.ServersTestBase):
allocations[compute_rp_uuid]['resources'])
# restart compute which will trigger a reshape
- self.compute = self.restart_compute_service(self.compute)
+ self.compute = self.restart_compute_service(self.hostname)
# verify that the inventory, usages and allocation are correct after
# the reshape
diff --git a/nova/tests/functional/libvirt/test_vgpu.py b/nova/tests/functional/libvirt/test_vgpu.py
index f25ce44221..686582120a 100644
--- a/nova/tests/functional/libvirt/test_vgpu.py
+++ b/nova/tests/functional/libvirt/test_vgpu.py
@@ -49,11 +49,11 @@ class VGPUTestBase(base.ServersTestBase):
def setUp(self):
super(VGPUTestBase, self).setUp()
- self.useFixture(fixtures.MockPatch(
- 'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
- return_value={'total': 128,
- 'used': 44,
- 'free': 84}))
+ libvirt_driver.LibvirtDriver._get_local_gb_info.return_value = {
+ 'total': 128,
+ 'used': 44,
+ 'free': 84,
+ }
self.useFixture(fixtures.MockPatch(
'nova.privsep.libvirt.create_mdev',
side_effect=self._create_mdev))
@@ -113,8 +113,8 @@ class VGPUTestBase(base.ServersTestBase):
parent=libvirt_parent)})
return uuid
- def start_compute(self, hostname):
- hostname = super().start_compute(
+ def start_compute_with_vgpu(self, hostname):
+ hostname = self.start_compute(
pci_info=fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
),
@@ -197,7 +197,7 @@ class VGPUTests(VGPUTestBase):
enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
group='devices')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
def assert_vgpu_usage_for_compute(self, compute, expected):
self.assert_mdev_usage(compute, expected_amount=expected)
@@ -211,7 +211,7 @@ class VGPUTests(VGPUTestBase):
def test_resize_servers_with_vgpu(self):
# Add another compute for the sake of resizing
- self.compute2 = self.start_compute('host2')
+ self.compute2 = self.start_compute_with_vgpu('host2')
server = self._create_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor, host=self.compute1.host,
@@ -337,7 +337,7 @@ class VGPUMultipleTypesTests(VGPUTestBase):
# Prepare traits for later on
self._create_trait('CUSTOM_NVIDIA_11')
self._create_trait('CUSTOM_NVIDIA_12')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
def test_create_servers_with_vgpu(self):
self._create_server(
@@ -369,13 +369,12 @@ class VGPUMultipleTypesTests(VGPUTestBase):
def test_create_servers_with_specific_type(self):
# Regenerate the PCI addresses so both pGPUs now support nvidia-12
- connection = self.computes[
- self.compute1.host].driver._host.get_connection()
- connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
multiple_gpu_types=True)
# Make a restart to update the Resource Providers
- self.compute1 = self.restart_compute_service(self.compute1)
+ self.compute1 = self.restart_compute_service(
+ self.compute1.host, pci_info=pci_info, keep_hypervisor_state=False)
pgpu1_rp_uuid = self._get_provider_uuid_by_name(
self.compute1.host + '_' + fakelibvirt.MDEVCAP_DEV1_PCI_ADDR)
pgpu2_rp_uuid = self._get_provider_uuid_by_name(
@@ -451,7 +450,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
group='mdev_nvidia-12')
self.flags(mdev_class='CUSTOM_NOTVGPU', group='mdev_mlx5_core')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
# Regenerate the PCI addresses so they can support both mlx5 and
# nvidia-12 types
connection = self.computes[
@@ -460,7 +459,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
generic_types=True)
# Make a restart to update the Resource Providers
- self.compute1 = self.restart_compute_service(self.compute1)
+ self.compute1 = self.restart_compute_service('host1')
def test_create_servers_with_different_mdev_classes(self):
physdev1_rp_uuid = self._get_provider_uuid_by_name(
@@ -498,7 +497,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
def test_resize_servers_with_mlx5(self):
# Add another compute for the sake of resizing
- self.compute2 = self.start_compute('host2')
+ self.compute2 = self.start_compute_with_vgpu('host2')
# Regenerate the PCI addresses so they can support both mlx5 and
# nvidia-12 types
connection = self.computes[
@@ -507,7 +506,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
generic_types=True)
# Make a restart to update the Resource Providers
- self.compute2 = self.restart_compute_service(self.compute2)
+ self.compute2 = self.restart_compute_service('host2')
# Use the new flavor for booting
server = self._create_server(
diff --git a/nova/tests/functional/libvirt/test_vtpm.py b/nova/tests/functional/libvirt/test_vtpm.py
index c07c38f02d..4e9c705052 100644
--- a/nova/tests/functional/libvirt/test_vtpm.py
+++ b/nova/tests/functional/libvirt/test_vtpm.py
@@ -128,7 +128,7 @@ class VTPMServersTest(base.ServersTestBase):
# the presence of users on the host, none of which makes sense here
_p = mock.patch(
'nova.virt.libvirt.driver.LibvirtDriver._check_vtpm_support')
- self.mock_conn = _p.start()
+ _p.start()
self.addCleanup(_p.stop)
self.key_mgr = crypto._get_key_manager()
diff --git a/nova/tests/functional/regressions/test_bug_1628606.py b/nova/tests/functional/regressions/test_bug_1628606.py
new file mode 100644
index 0000000000..0fccd78cce
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1628606.py
@@ -0,0 +1,60 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+from unittest import mock
+
+
+class PostLiveMigrationFail(
+ test.TestCase, integrated_helpers.InstanceHelperMixin):
+ """Regression test for bug 1628606
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+
+ self.start_service('conductor')
+ self.start_service('scheduler')
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+
+ self.api = api_fixture.admin_api
+ self.api.microversion = 'latest'
+
+ self.src = self._start_compute(host='host1')
+ self.dest = self._start_compute(host='host2')
+
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager'
+ '._post_live_migration_remove_source_vol_connections')
+ def test_post_live_migration(self, mock_migration):
+ server = self._create_server(networks=[])
+ self.assertEqual(self.src.host, server['OS-EXT-SRV-ATTR:host'])
+
+ error = client.OpenStackApiException(
+ "Failed to remove source vol connection post live migration")
+ mock_migration.side_effect = error
+
+ server = self._live_migrate(
+ server, migration_expected_state='error',
+ server_expected_state='ERROR')
+
+ self.assertEqual(self.dest.host, server['OS-EXT-SRV-ATTR:host'])
diff --git a/nova/tests/functional/regressions/test_bug_1781286.py b/nova/tests/functional/regressions/test_bug_1781286.py
index 7b2d603092..bb47eb0ea8 100644
--- a/nova/tests/functional/regressions/test_bug_1781286.py
+++ b/nova/tests/functional/regressions/test_bug_1781286.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
import mock
from oslo_db import exception as oslo_db_exc
@@ -67,11 +66,11 @@ class RescheduleBuildAvailabilityZoneUpCall(
def wrap_bari(*args, **kwargs):
# Poison the AZ query to blow up as if the cell conductor does not
# have access to the API DB.
- self.useFixture(
- fixtures.MockPatch(
- 'nova.objects.AggregateList.get_by_host',
- side_effect=oslo_db_exc.CantStartEngineError))
- return original_bari(*args, **kwargs)
+ with mock.patch(
+ 'nova.objects.AggregateList.get_by_host',
+ side_effect=oslo_db_exc.CantStartEngineError
+ ):
+ return original_bari(*args, **kwargs)
self.stub_out('nova.compute.manager.ComputeManager.'
'build_and_run_instance', wrap_bari)
@@ -81,10 +80,6 @@ class RescheduleBuildAvailabilityZoneUpCall(
# compute service we have to wait for the notification that the build
# is complete and then stop the mock so we can use the API again.
self.notifier.wait_for_versioned_notifications('instance.create.end')
- # Note that we use stopall here because we actually called
- # build_and_run_instance twice so we have more than one instance of
- # the mock that needs to be stopped.
- mock.patch.stopall()
server = self._wait_for_state_change(server, 'ACTIVE')
# We should have rescheduled and the instance AZ should be set from the
# Selection object. Since neither compute host is in an AZ, the server
@@ -128,19 +123,20 @@ class RescheduleMigrateAvailabilityZoneUpCall(
self.rescheduled = None
def wrap_prep_resize(_self, *args, **kwargs):
- # Poison the AZ query to blow up as if the cell conductor does not
- # have access to the API DB.
- self.agg_mock = self.useFixture(
- fixtures.MockPatch(
- 'nova.objects.AggregateList.get_by_host',
- side_effect=oslo_db_exc.CantStartEngineError)).mock
if self.rescheduled is None:
# Track the first host that we rescheduled from.
self.rescheduled = _self.host
# Trigger a reschedule.
raise exception.ComputeResourcesUnavailable(
reason='test_migrate_reschedule_blocked_az_up_call')
- return original_prep_resize(_self, *args, **kwargs)
+ # Poison the AZ query to blow up as if the cell conductor does not
+ # have access to the API DB.
+ with mock.patch(
+ 'nova.objects.AggregateList.get_by_host',
+ side_effect=oslo_db_exc.CantStartEngineError,
+ ) as agg_mock:
+ self.agg_mock = agg_mock
+ return original_prep_resize(_self, *args, **kwargs)
self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
wrap_prep_resize)
diff --git a/nova/tests/functional/regressions/test_bug_1888395.py b/nova/tests/functional/regressions/test_bug_1888395.py
index e582ad3e85..c50b78e2f6 100644
--- a/nova/tests/functional/regressions/test_bug_1888395.py
+++ b/nova/tests/functional/regressions/test_bug_1888395.py
@@ -23,14 +23,8 @@ from nova.tests.fixtures import libvirt as fakelibvirt
from nova.tests.functional.libvirt import base as libvirt_base
-class TestLiveMigrationWithoutMultiplePortBindings(
+class TestLiveMigrationWithoutMultiplePortBindingsBase(
libvirt_base.ServersTestBase):
- """Regression test for bug 1888395.
-
- This regression test asserts that Live migration works when
- neutron does not support the binding-extended api extension
- and the legacy single port binding workflow is used.
- """
ADMIN_API = True
microversion = 'latest'
@@ -72,6 +66,16 @@ class TestLiveMigrationWithoutMultiplePortBindings(
'nova.tests.fixtures.libvirt.Domain.migrateToURI3',
self._migrate_stub))
+
+class TestLiveMigrationWithoutMultiplePortBindings(
+ TestLiveMigrationWithoutMultiplePortBindingsBase):
+ """Regression test for bug 1888395.
+
+ This regression test asserts that Live migration works when
+ neutron does not support the binding-extended api extension
+ and the legacy single port binding workflow is used.
+ """
+
def _migrate_stub(self, domain, destination, params, flags):
"""Stub out migrateToURI3."""
@@ -124,3 +128,25 @@ class TestLiveMigrationWithoutMultiplePortBindings(
server, {'OS-EXT-SRV-ATTR:host': 'end_host', 'status': 'ACTIVE'})
msg = "NotImplementedError: Cannot load 'vif_type' in the base class"
self.assertNotIn(msg, self.stdlog.logger.output)
+
+
+class TestLiveMigrationRollbackWithoutMultiplePortBindings(
+ TestLiveMigrationWithoutMultiplePortBindingsBase):
+
+ def _migrate_stub(self, domain, destination, params, flags):
+ source = self.computes['start_host']
+ conn = source.driver._host.get_connection()
+ dom = conn.lookupByUUIDString(self.server['id'])
+ dom.fail_job()
+
+ def test_live_migration_rollback(self):
+ self.server = self._create_server(
+ host='start_host',
+ networks=[{'port': self.neutron.port_1['id']}])
+
+ self.assertFalse(
+ self.neutron_api.has_port_binding_extension(self.ctxt))
+ # NOTE(artom) The live migration will still fail (we fail it in
+ # _migrate_stub()), but the server should correctly rollback to ACTIVE.
+ self._live_migrate(self.server, migration_expected_state='failed',
+ server_expected_state='ACTIVE')
diff --git a/nova/tests/functional/regressions/test_bug_1890244.py b/nova/tests/functional/regressions/test_bug_1890244.py
new file mode 100644
index 0000000000..bf969eebe7
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1890244.py
@@ -0,0 +1,96 @@
+# Copyright 2017 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from nova import context
+from nova import objects
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+
+
+class IgnoreDeletedServerGroupsTest(
+ test.TestCase, integrated_helpers.InstanceHelperMixin,
+):
+ """Regression test for bug 1890244
+
+ If instance are created as member of server groups it
+ should be possibel to evacuate them if the server groups are
+ deleted prior to the host failure.
+ """
+
+ def setUp(self):
+ super().setUp()
+ # Stub out external dependencies.
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+ # Start nova controller services.
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+ self.api = api_fixture.admin_api
+ self.start_service('conductor')
+ # Use a custom weigher to make sure that we have a predictable
+ # scheduling sort order.
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+ self.start_service('scheduler')
+ # Start two computes, one where the server will be created and another
+ # where we'll evacuate it to.
+ self.src = self._start_compute('host1')
+ self.dest = self._start_compute('host2')
+ self.notifier = self.useFixture(
+ nova_fixtures.NotificationFixture(self)
+ )
+
+ def test_evacuate_after_group_delete(self):
+ # Create an anti-affinity group for the server.
+ body = {
+ 'server_group': {
+ 'name': 'test-group',
+ 'policies': ['anti-affinity']
+ }
+ }
+ group_id = self.api.api_post(
+ '/os-server-groups', body).body['server_group']['id']
+
+ # Create a server in the group which should land on host1 due to our
+ # custom weigher.
+ body = {'server': self._build_server()}
+ body['os:scheduler_hints'] = {'group': group_id}
+ server = self.api.post_server(body)
+ server = self._wait_for_state_change(server, 'ACTIVE')
+ self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
+
+ # Down the source compute to enable the evacuation
+ self.api.microversion = '2.11' # Cap for the force-down call.
+ self.api.force_down_service('host1', 'nova-compute', True)
+ self.api.microversion = 'latest'
+ self.src.stop()
+
+ # assert the server currently has a server group
+ reqspec = objects.RequestSpec.get_by_instance_uuid(
+ context.get_admin_context(), server['id'])
+ self.assertIsNotNone(reqspec.instance_group)
+ self.assertIn('group', reqspec.scheduler_hints)
+ # then delete it so that we need to clean it up on evac
+ self.api.api_delete(f'/os-server-groups/{group_id}')
+
+ # Initiate evacuation
+ server = self._evacuate_server(
+ server, expected_host='host2', expected_migration_status='done'
+ )
+ reqspec = objects.RequestSpec.get_by_instance_uuid(
+ context.get_admin_context(), server['id'])
+ self.assertIsNone(reqspec.instance_group)
+ self.assertNotIn('group', reqspec.scheduler_hints)
diff --git a/nova/tests/functional/regressions/test_bug_1896463.py b/nova/tests/functional/regressions/test_bug_1896463.py
index 6663ebe8cd..dc74791e0e 100644
--- a/nova/tests/functional/regressions/test_bug_1896463.py
+++ b/nova/tests/functional/regressions/test_bug_1896463.py
@@ -51,14 +51,6 @@ class TestEvacuateResourceTrackerRace(
self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_mac_by_pci_address',
- return_value='52:54:00:1e:59:c6'))
-
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_vf_num_by_pci_address',
- return_value=1))
-
self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = 'latest'
self.api = self.admin_api
diff --git a/nova/tests/functional/regressions/test_bug_1951656.py b/nova/tests/functional/regressions/test_bug_1951656.py
new file mode 100644
index 0000000000..d705ff6fe3
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1951656.py
@@ -0,0 +1,73 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import uuidutils
+
+
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_vgpu
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+class VGPUTestsLibvirt7_7(test_vgpu.VGPUTestBase):
+
+ def _create_mdev(self, physical_device, mdev_type, uuid=None):
+ # We need to fake the newly created sysfs object by adding a new
+ # FakeMdevDevice in the existing persisted Connection object so
+ # when asking to get the existing mdevs, we would see it.
+ if not uuid:
+ uuid = uuidutils.generate_uuid()
+ mdev_name = libvirt_utils.mdev_uuid2name(uuid)
+ libvirt_parent = self.pci2libvirt_address(physical_device)
+
+ # Libvirt 7.7 now creates mdevs with a parent_addr suffix.
+ new_mdev_name = '_'.join([mdev_name, libvirt_parent])
+
+ # Here, we get the right compute thanks by the self.current_host that
+ # was modified just before
+ connection = self.computes[
+ self._current_host].driver._host.get_connection()
+ connection.mdev_info.devices.update(
+ {mdev_name: fakelibvirt.FakeMdevDevice(dev_name=new_mdev_name,
+ type_id=mdev_type,
+ parent=libvirt_parent)})
+ return uuid
+
+ def setUp(self):
+ super(VGPUTestsLibvirt7_7, self).setUp()
+ extra_spec = {"resources:VGPU": "1"}
+ self.flavor = self._create_flavor(extra_spec=extra_spec)
+
+ # Start compute1 supporting only nvidia-11
+ self.flags(
+ enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
+ group='devices')
+
+ self.compute1 = self.start_compute_with_vgpu('host1')
+
+ def test_create_servers_with_vgpu(self):
+
+ # Create a single instance against a specific compute node.
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=1)
+
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=2)
diff --git a/nova/tests/functional/test_aggregates.py b/nova/tests/functional/test_aggregates.py
index 8dfb345578..1ffa3ada92 100644
--- a/nova/tests/functional/test_aggregates.py
+++ b/nova/tests/functional/test_aggregates.py
@@ -935,11 +935,11 @@ class TestAggregateMultiTenancyIsolationFilter(
# Start nova services.
self.start_service('conductor')
- self.admin_api = self.useFixture(
- nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
- self.api = self.useFixture(
- nova_fixtures.OSAPIFixture(api_version='v2.1',
- project_id=uuids.non_admin)).api
+ api_fixture = self.useFixture(
+ nova_fixtures.OSAPIFixture(api_version='v2.1'))
+ self.admin_api = api_fixture.admin_api
+ self.api = api_fixture.api
+ self.api.project_id = uuids.non_admin
# Add the AggregateMultiTenancyIsolation to the list of enabled
# filters since it is not enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
@@ -1037,15 +1037,15 @@ class AggregateMultiTenancyIsolationColdMigrateTest(
self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
- # Intentionally keep these separate since we want to create the
- # server with the non-admin user in a different project.
- admin_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ # Intentionally define different project id for the two client since
+ # we want to create the server with the non-admin user in a different
+ # project.
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1', project_id=uuids.admin_project))
- self.admin_api = admin_api_fixture.admin_api
+ self.admin_api = api_fixture.admin_api
self.admin_api.microversion = 'latest'
- user_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1', project_id=uuids.user_project))
- self.api = user_api_fixture.api
+ self.api = api_fixture.api
+ self.api.project_id = uuids.user_project
self.api.microversion = 'latest'
self.start_service('conductor')
diff --git a/nova/tests/functional/test_images.py b/nova/tests/functional/test_images.py
index 340e883da9..e7e9f2a6c9 100644
--- a/nova/tests/functional/test_images.py
+++ b/nova/tests/functional/test_images.py
@@ -12,7 +12,6 @@
from oslo_utils.fixture import uuidsentinel as uuids
-from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -70,10 +69,9 @@ class ImagesTest(integrated_helpers._IntegratedTestBase):
server = self.api.post_server({"server": server})
server = self._wait_for_state_change(server, 'ACTIVE')
- # Create an admin API fixture with a unique project ID.
- admin_api = self.useFixture(
- nova_fixtures.OSAPIFixture(
- project_id=uuids.admin_project)).admin_api
+ # use an admin API with a unique project ID.
+ admin_api = self.api_fixture.alternative_admin_api
+ admin_api.project_id = uuids.admin_project
# Create a snapshot of the server using the admin project.
name = 'admin-created-snapshot'
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
index 08e47b3971..a64a04b2c9 100644
--- a/nova/tests/functional/test_server_group.py
+++ b/nova/tests/functional/test_server_group.py
@@ -64,12 +64,12 @@ class ServerGroupTestBase(test.TestCase,
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
- self.api = api_fixture.api
+ self.api = self.api_fixture.api
self.api.microversion = self.microversion
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = self.microversion
self.start_service('conductor')
@@ -174,13 +174,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
# Create an API using project 'openstack1'.
# This is a non-admin API.
- #
- # NOTE(sdague): this is actually very much *not* how this
- # fixture should be used. This actually spawns a whole
- # additional API server. Should be addressed in the future.
- api_openstack1 = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version=self.api_major_version,
- project_id=PROJECT_ID_ALT)).api
+ api_openstack1 = self.api_fixture.alternative_api
+ api_openstack1.project_id = PROJECT_ID_ALT
api_openstack1.microversion = self.microversion
# Create a server group in project 'openstack'
diff --git a/nova/tests/functional/test_server_rescue.py b/nova/tests/functional/test_server_rescue.py
index fa96c10344..8f5b912943 100644
--- a/nova/tests/functional/test_server_rescue.py
+++ b/nova/tests/functional/test_server_rescue.py
@@ -10,6 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -23,7 +27,37 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
self.useFixture(nova_fixtures.CinderFixture(self))
self._start_compute(host='host1')
- def _create_bfv_server(self):
+ def _create_image(self, metadata=None):
+ image = {
+ 'id': uuids.stable_rescue_image,
+ 'name': 'fake-image-rescue-property',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': ['tag1', 'tag2'],
+ 'properties': {
+ 'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi',
+ },
+ }
+ if metadata:
+ image['properties'].update(metadata)
+ return self.glance.create(None, image)
+
+ def _create_bfv_server(self, metadata=None):
+ image = self._create_image(metadata=metadata)
server_request = self._build_server(networks=[])
server_request.pop('imageRef')
server_request['block_device_mapping_v2'] = [{
@@ -33,7 +67,7 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
'destination_type': 'volume'}]
server = self.api.post_server({'server': server_request})
self._wait_for_state_change(server, 'ACTIVE')
- return server
+ return server, image
class DisallowBFVRescuev286(BFVRescue):
@@ -43,10 +77,10 @@ class DisallowBFVRescuev286(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -60,10 +94,10 @@ class DisallowBFVRescuev286WithTrait(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -77,10 +111,10 @@ class DisallowBFVRescuev287WithoutTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Host unable to rescue a volume-backed instance',
ex.response.text)
@@ -94,7 +128,41 @@ class AllowBFVRescuev287WithTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
+ self._wait_for_state_change(server, 'RESCUE')
+
+
+class DisallowBFVRescuev287WithoutRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests fail with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are not set on the image.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_failed(self):
+ server, image = self._create_bfv_server()
+ # try rescue without hw_rescue_device and hw_rescue_bus properties set
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Cannot rescue a volume-backed instance',
+ ex.response.text)
+
+
+class AllowBFVRescuev287WithRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests pass with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are set on the image.
+ """
+ compute_driver = 'fake.RescueBFVDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_done(self):
+ server, image = self._create_bfv_server()
+ self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
self._wait_for_state_change(server, 'RESCUE')
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index e77d4bf1ea..440195cd19 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -1253,9 +1253,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase):
def test_get_servers_detail_filters(self):
# We get the results only from the up cells, this ignoring the down
# cells if list_records_by_skipping_down_cells config option is True.
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(
search_opts={'hostname': "cell3-inst0"})
@@ -1263,9 +1261,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase):
self.assertEqual(self.up_cell_insts[2], servers[0]['id'])
def test_get_servers_detail_all_tenants_with_down_cells(self):
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(search_opts={'all_tenants': True})
# 4 servers from the up cells and 4 servers from the down cells
@@ -1523,10 +1519,8 @@ class ServersTestV280(integrated_helpers._IntegratedTestBase):
def setUp(self):
super(ServersTestV280, self).setUp()
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.api = api_fixture.api
- self.admin_api = api_fixture.admin_api
+ self.api = self.api_fixture.api
+ self.admin_api = self.api_fixture.admin_api
self.api.microversion = '2.80'
self.admin_api.microversion = '2.80'
@@ -1585,9 +1579,8 @@ class ServersTestV280(integrated_helpers._IntegratedTestBase):
project_id_1 = '4906260553374bf0a5d566543b320516'
project_id_2 = 'c850298c1b6b4796a8f197ac310b2469'
- new_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version=self.api_major_version, project_id=project_id_1))
- new_admin_api = new_api_fixture.admin_api
+ new_admin_api = self.api_fixture.alternative_admin_api
+ new_admin_api.project_id = project_id_1
new_admin_api.microversion = '2.80'
post = {
diff --git a/nova/tests/unit/api/openstack/compute/test_create_backup.py b/nova/tests/unit/api/openstack/compute/test_create_backup.py
index f7280a5a37..70978d11de 100644
--- a/nova/tests/unit/api/openstack/compute/test_create_backup.py
+++ b/nova/tests/unit/api/openstack/compute/test_create_backup.py
@@ -40,10 +40,6 @@ class CreateBackupTestsV21(admin_only_action_common.CommonMixin,
self.controller = getattr(self.create_backup, self.controller_name)()
self.compute_api = self.controller.compute_api
- patch_get = mock.patch.object(self.compute_api, 'get')
- self.mock_get = patch_get.start()
- self.addCleanup(patch_get.stop)
-
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_with_metadata(self, mock_backup, mock_check_image):
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index 8c25a2efc2..1c5c34e758 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -353,14 +353,37 @@ class FlavorAccessTestV21(test.NoDBTestCase):
mock_verify.assert_called_once_with(
req.environ['nova.context'], 'proj2')
+ @mock.patch('nova.objects.Flavor.remove_access')
@mock.patch('nova.api.openstack.identity.verify_project_id',
side_effect=exc.HTTPBadRequest(
explanation="Project ID proj2 is not a valid project."))
- def test_remove_tenant_access_with_invalid_tenant(self, mock_verify):
+ def test_remove_tenant_access_with_invalid_tenant(self,
+ mock_verify,
+ mock_remove_access):
"""Tests the case that the tenant does not exist in Keystone."""
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
+ self.flavor_action_controller._remove_tenant_access(
+ req, '2', body=body)
+ mock_verify.assert_called_once_with(
+ req.environ['nova.context'], 'proj2')
+ mock_remove_access.assert_called_once_with('proj2')
+
+ @mock.patch('nova.api.openstack.identity.verify_project_id',
+ side_effect=exc.HTTPBadRequest(
+ explanation="Nova was unable to find Keystone "
+ "service endpoint."))
+ def test_remove_tenant_access_missing_keystone_endpoint(self,
+ mock_verify):
+ """Tests the case that Keystone identity service endpoint
+ version 3.0 was not found.
+ """
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
self.assertRaises(exc.HTTPBadRequest,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_hypervisors.py b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
index facc5389be..6545031a0b 100644
--- a/nova/tests/unit/api/openstack/compute/test_hypervisors.py
+++ b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
@@ -368,25 +368,23 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.index(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
- else compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- self.assertDictEqual(expected, result['hypervisors'][0])
+ m_get = self.controller.host_api.compute_node_get_all
+ m_get.side_effect = None
+ m_get.return_value = compute_nodes
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
- _test(self)
+ req = self._get_request(True)
+ result = self.controller.index(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
+ else compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ self.assertDictEqual(expected, result['hypervisors'][0])
def test_index_compute_host_not_mapped(self):
"""Tests that we don't fail index if a host is not mapped."""
@@ -402,25 +400,22 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.index(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
- else compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- self.assertDictEqual(expected, result['hypervisors'][0])
+ self.controller.host_api.compute_node_get_all.return_value = (
+ compute_nodes)
+ self.controller.host_api.service_get_by_compute_host = (
+ fake_service_get_by_compute_host)
- _test(self)
+ req = self._get_request(True)
+ result = self.controller.index(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
+ else compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ self.assertDictEqual(expected, result['hypervisors'][0])
def test_detail(self):
req = self._get_request(True)
@@ -444,32 +439,30 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.detail(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- # we don't care about all of the details, just make sure we get
- # the subset we care about and there are more keys than what index
- # would return
- hypervisor = result['hypervisors'][0]
- self.assertTrue(
- set(expected.keys()).issubset(set(hypervisor.keys())))
- self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
- self.assertEqual(compute_nodes[0].hypervisor_hostname,
- hypervisor['hypervisor_hostname'])
-
- _test(self)
+ m_get = self.controller.host_api.compute_node_get_all
+ m_get.side_effect = None
+ m_get.return_value = compute_nodes
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
+
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ # we don't care about all of the details, just make sure we get
+ # the subset we care about and there are more keys than what index
+ # would return
+ hypervisor = result['hypervisors'][0]
+ self.assertTrue(
+ set(expected.keys()).issubset(set(hypervisor.keys())))
+ self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
+ self.assertEqual(compute_nodes[0].hypervisor_hostname,
+ hypervisor['hypervisor_hostname'])
def test_detail_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
@@ -487,32 +480,28 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.detail(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- # we don't care about all of the details, just make sure we get
- # the subset we care about and there are more keys than what index
- # would return
- hypervisor = result['hypervisors'][0]
- self.assertTrue(
- set(expected.keys()).issubset(set(hypervisor.keys())))
- self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
- self.assertEqual(compute_nodes[0].hypervisor_hostname,
- hypervisor['hypervisor_hostname'])
-
- _test(self)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
+ self.controller.host_api.compute_node_get_all.return_value = (
+ compute_nodes)
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ # we don't care about all of the details, just make sure we get
+ # the subset we care about and there are more keys than what index
+ # would return
+ hypervisor = result['hypervisors'][0]
+ self.assertTrue(
+ set(expected.keys()).issubset(set(hypervisor.keys())))
+ self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
+ self.assertEqual(compute_nodes[0].hypervisor_hostname,
+ hypervisor['hypervisor_hostname'])
def test_show(self):
req = self._get_request(True)
@@ -525,21 +514,16 @@ class HypervisorsTestV21(test.NoDBTestCase):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
-
- @mock.patch.object(self.controller.host_api, 'compute_node_get',
- return_value=self.TEST_HYPERS_OBJ[0])
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host')
- def _test(self, mock_service, mock_compute_node_get):
- req = self._get_request(True)
- mock_service.side_effect = exception.HostMappingNotFound(
- name='foo')
- hyper_id = self._get_hyper_id()
- self.assertRaises(exc.HTTPNotFound, self.controller.show,
- req, hyper_id)
- self.assertTrue(mock_service.called)
- mock_compute_node_get.assert_called_once_with(mock.ANY, hyper_id)
- _test(self)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='foo'))
+ req = self._get_request(True)
+ hyper_id = self._get_hyper_id()
+ self.assertRaises(
+ exc.HTTPNotFound, self.controller.show, req, hyper_id)
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
+ self.controller.host_api.compute_node_get.assert_called_once_with(
+ mock.ANY, hyper_id)
def test_show_noid(self):
req = self._get_request(True)
@@ -611,20 +595,15 @@ class HypervisorsTestV21(test.NoDBTestCase):
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_uptime_hypervisor_not_mapped_service_get(self):
- @mock.patch.object(self.controller.host_api, 'compute_node_get')
- @mock.patch.object(self.controller.host_api, 'get_host_uptime')
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- side_effect=exception.HostMappingNotFound(
- name='dummy'))
- def _test(mock_get, _, __):
- req = self._get_request(True)
- hyper_id = self._get_hyper_id()
- self.assertRaises(exc.HTTPNotFound,
- self.controller.uptime, req, hyper_id)
- self.assertTrue(mock_get.called)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='dummy'))
- _test()
+ req = self._get_request(True)
+ hyper_id = self._get_hyper_id()
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.uptime, req, hyper_id)
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
def test_uptime_hypervisor_not_mapped(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
@@ -644,30 +623,26 @@ class HypervisorsTestV21(test.NoDBTestCase):
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_search_non_exist(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.search,
- req, 'a')
- self.assertEqual(1, mock_node_search.call_count)
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+ self.assertEqual(1, m_search.call_count)
def test_search_unmapped(self):
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = [mock.MagicMock()]
- @mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor')
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host')
- def _test(mock_service, mock_search):
- mock_search.return_value = [mock.MagicMock()]
- mock_service.side_effect = exception.HostMappingNotFound(
- name='foo')
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.search,
- req, 'a')
- self.assertTrue(mock_service.called)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='foo'))
- _test()
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
@mock.patch.object(objects.InstanceList, 'get_by_host',
side_effect=fake_instance_get_all_by_host)
@@ -702,15 +677,12 @@ class HypervisorsTestV21(test.NoDBTestCase):
def test_servers_compute_host_not_found(self):
req = self._get_request(True)
- with test.nested(
- mock.patch.object(
- self.controller.host_api, 'instance_get_all_by_host',
- side_effect=fake_instance_get_all_by_host,
- ),
- mock.patch.object(
- self.controller.host_api, 'service_get_by_compute_host',
- side_effect=exception.ComputeHostNotFound(host='foo'),
- ),
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.ComputeHostNotFound(host='foo'))
+ with mock.patch.object(
+ self.controller.host_api,
+ 'instance_get_all_by_host',
+ side_effect=fake_instance_get_all_by_host,
):
# The result should be empty since every attempt to fetch the
# service for a hypervisor "failed"
@@ -718,24 +690,25 @@ class HypervisorsTestV21(test.NoDBTestCase):
self.assertEqual({'hypervisors': []}, result)
def test_servers_non_id(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers,
- req, '115')
- self.assertEqual(1, mock_node_search.call_count)
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.servers,
+ req, '115')
+ self.assertEqual(1, m_search.call_count)
def test_servers_with_non_integer_hypervisor_id(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers, req, 'abc')
- self.assertEqual(1, mock_node_search.call_count)
+ req = self._get_request(True)
+ self.assertRaises(
+ exc.HTTPNotFound, self.controller.servers, req, 'abc')
+ self.assertEqual(1, m_search.call_count)
def test_servers_with_no_servers(self):
with mock.patch.object(self.controller.host_api,
@@ -1089,15 +1062,13 @@ class HypervisorsTestV253(HypervisorsTestV252):
use_admin_context=True,
url='/os-hypervisors?with_servers=1')
- with test.nested(
- mock.patch.object(
- self.controller.host_api, 'instance_get_all_by_host',
- side_effect=fake_instance_get_all_by_host,
- ),
- mock.patch.object(
- self.controller.host_api, 'service_get_by_compute_host',
- side_effect=exception.ComputeHostNotFound(host='foo'),
- ),
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.ComputeHostNotFound(host='foo'))
+
+ with mock.patch.object(
+ self.controller.host_api,
+ "instance_get_all_by_host",
+ side_effect=fake_instance_get_all_by_host,
):
# The result should be empty since every attempt to fetch the
# service for a hypervisor "failed"
@@ -1157,11 +1128,13 @@ class HypervisorsTestV253(HypervisorsTestV252):
use_admin_context=True,
url='/os-hypervisors?with_servers=yes&'
'hypervisor_hostname_pattern=shenzhen')
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=objects.ComputeNodeList()) as s:
- self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
- s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = objects.ComputeNodeList()
+
+ self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
+ m_search.assert_called_once_with(
+ req.environ['nova.context'], 'shenzhen')
def test_detail_with_hostname_pattern(self):
"""Test listing hypervisors with details and using the
@@ -1170,13 +1143,14 @@ class HypervisorsTestV253(HypervisorsTestV252):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=shenzhen')
- with mock.patch.object(
- self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=objects.ComputeNodeList(objects=[TEST_HYPERS_OBJ[0]])
- ) as s:
- result = self.controller.detail(req)
- s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = objects.ComputeNodeList(
+ objects=[TEST_HYPERS_OBJ[0]])
+
+ result = self.controller.detail(req)
+ m_search.assert_called_once_with(
+ req.environ['nova.context'], 'shenzhen')
expected = {'hypervisors': [self.DETAIL_HYPERS_DICTS[0]]}
@@ -1483,15 +1457,11 @@ class HypervisorsTestV288(HypervisorsTestV275):
self.controller.uptime, req)
def test_uptime_old_version(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- return_value='fake uptime',
- ):
- req = self._get_request(use_admin_context=True, version='2.87')
- hyper_id = self._get_hyper_id()
+ req = self._get_request(use_admin_context=True, version='2.87')
+ hyper_id = self._get_hyper_id()
- # no exception == pass
- self.controller.uptime(req, hyper_id)
+ # no exception == pass
+ self.controller.uptime(req, hyper_id)
def test_uptime_noid(self):
# the separate 'uptime' API has been removed, so skip this test
@@ -1526,34 +1496,36 @@ class HypervisorsTestV288(HypervisorsTestV275):
pass
def test_show_with_uptime_notimplemented(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- side_effect=NotImplementedError,
- ) as mock_get_uptime:
- req = self._get_request(use_admin_context=True)
- hyper_id = self._get_hyper_id()
+ self.controller.host_api.get_host_uptime.side_effect = (
+ NotImplementedError())
- result = self.controller.show(req, hyper_id)
+ req = self._get_request(use_admin_context=True)
+ hyper_id = self._get_hyper_id()
- expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
- expected_dict.update({'uptime': None})
- self.assertEqual({'hypervisor': expected_dict}, result)
- self.assertEqual(1, mock_get_uptime.call_count)
+ result = self.controller.show(req, hyper_id)
+
+ expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
+ expected_dict.update({'uptime': None})
+ self.assertEqual({'hypervisor': expected_dict}, result)
+ self.assertEqual(
+ 1, self.controller.host_api.get_host_uptime.call_count)
def test_show_with_uptime_hypervisor_down(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- side_effect=exception.ComputeServiceUnavailable(host='dummy')
- ) as mock_get_uptime:
- req = self._get_request(use_admin_context=True)
- hyper_id = self._get_hyper_id()
+ self.controller.host_api.get_host_uptime.side_effect = (
+ exception.ComputeServiceUnavailable(host='dummy'))
- result = self.controller.show(req, hyper_id)
+ req = self._get_request(use_admin_context=True)
+ hyper_id = self._get_hyper_id()
- expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
- expected_dict.update({'uptime': None})
- self.assertEqual({'hypervisor': expected_dict}, result)
- self.assertEqual(1, mock_get_uptime.call_count)
+ result = self.controller.show(req, hyper_id)
+
+ expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
+ expected_dict.update({'uptime': None})
+ self.assertEqual({'hypervisor': expected_dict}, result)
+ self.assertEqual(
+ 1,
+ self.controller.host_api.get_host_uptime.call_count
+ )
def test_show_old_version(self):
# ensure things still work as expected here
diff --git a/nova/tests/unit/api/openstack/compute/test_limits.py b/nova/tests/unit/api/openstack/compute/test_limits.py
index a5ac0bca24..69676e28ac 100644
--- a/nova/tests/unit/api/openstack/compute/test_limits.py
+++ b/nova/tests/unit/api/openstack/compute/test_limits.py
@@ -34,7 +34,6 @@ from nova.limit import local as local_limit
from nova.limit import placement as placement_limit
from nova import objects
from nova.policies import limits as l_policies
-from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
@@ -52,12 +51,12 @@ class BaseLimitTestSuite(test.NoDBTestCase):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
- mock_get_project_quotas = mock.patch.object(
+ patcher_get_project_quotas = mock.patch.object(
nova.quota.QUOTAS,
"get_project_quotas",
- side_effect = stub_get_project_quotas)
- mock_get_project_quotas.start()
- self.addCleanup(mock_get_project_quotas.stop)
+ side_effect=stub_get_project_quotas)
+ self.mock_get_project_quotas = patcher_get_project_quotas.start()
+ self.addCleanup(patcher_get_project_quotas.stop)
patcher = self.mock_can = mock.patch('nova.context.RequestContext.can')
self.mock_can = patcher.start()
self.addCleanup(patcher.stop)
@@ -154,16 +153,14 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
- response = request.get_response(self.controller)
+ response = request.get_response(self.controller)
- body = jsonutils.loads(response.body)
- self.assertEqual(expected, body)
- get_project_quotas.assert_called_once_with(context, tenant_id,
- usages=True)
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, tenant_id, usages=True)
def _do_test_used_limits(self, reserved):
request = self._get_index_request(tenant_id=None)
@@ -186,8 +183,7 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
def stub_get_project_quotas(context, project_id, usages=True):
return limits
- self.stub_out('nova.quota.QUOTAS.get_project_quotas',
- stub_get_project_quotas)
+ self.mock_get_project_quotas.side_effect = stub_get_project_quotas
res = request.get_response(self.controller)
body = jsonutils.loads(res.body)
@@ -211,14 +207,15 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
user_id=user_id,
project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
- self.assertEqual(2, self.mock_can.call_count)
- self.mock_can.assert_called_with(
- l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME)
- mock_get_quotas.assert_called_once_with(context,
- tenant_id, usages=True)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
+
+ fake_req.get_response(self.controller)
+ self.assertEqual(2, self.mock_can.call_count)
+ self.mock_can.assert_called_with(
+ l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME)
+ self.mock_get_project_quotas.assert_called_once_with(context,
+ tenant_id, usages=True)
def _test_admin_can_fetch_used_limits_for_own_project(self, req_get):
project_id = "123456"
@@ -230,11 +227,12 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
- mock_get_quotas.assert_called_once_with(context,
- project_id, usages=True)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
+
+ fake_req.get_response(self.controller)
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, project_id, usages=True)
def test_admin_can_fetch_used_limits_for_own_project(self):
req_get = {}
@@ -262,12 +260,13 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
project_id = "123456"
fake_req = self._get_index_request(project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
- mock_get_quotas.assert_called_once_with(context,
- project_id, usages=True)
+ fake_req.get_response(self.controller)
+
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, project_id, usages=True)
def test_used_ram_added(self):
fake_req = self._get_index_request()
@@ -275,28 +274,26 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
def stub_get_project_quotas(context, project_id, usages=True):
return {'ram': {'limit': 512, 'in_use': 256}}
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- side_effect=stub_get_project_quotas
- ) as mock_get_quotas:
+ self.mock_get_project_quotas.side_effect = stub_get_project_quotas
- res = fake_req.get_response(self.controller)
- body = jsonutils.loads(res.body)
- abs_limits = body['limits']['absolute']
- self.assertIn('totalRAMUsed', abs_limits)
- self.assertEqual(256, abs_limits['totalRAMUsed'])
- self.assertEqual(1, mock_get_quotas.call_count)
+ res = fake_req.get_response(self.controller)
+ body = jsonutils.loads(res.body)
+ abs_limits = body['limits']['absolute']
+ self.assertIn('totalRAMUsed', abs_limits)
+ self.assertEqual(256, abs_limits['totalRAMUsed'])
+ self.assertEqual(1, self.mock_get_project_quotas.call_count)
def test_no_ram_quota(self):
fake_req = self._get_index_request()
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
- res = fake_req.get_response(self.controller)
- body = jsonutils.loads(res.body)
- abs_limits = body['limits']['absolute']
- self.assertNotIn('totalRAMUsed', abs_limits)
- self.assertEqual(1, mock_get_quotas.call_count)
+ res = fake_req.get_response(self.controller)
+ body = jsonutils.loads(res.body)
+ abs_limits = body['limits']['absolute']
+ self.assertNotIn('totalRAMUsed', abs_limits)
+ self.assertEqual(1, self.mock_get_project_quotas.call_count)
class FakeHttplibSocket(object):
@@ -398,25 +395,24 @@ class LimitsControllerTestV236(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- response = self.controller.index(self.req)
- expected_response = {
- "limits": {
- "rate": [],
- "absolute": {
- "maxTotalRAMSize": 512,
- "maxTotalInstances": 5,
- "maxTotalCores": 21,
- "maxTotalKeypairs": 10,
- "totalRAMUsed": 256,
- "totalCoresUsed": 10,
- "totalInstancesUsed": 2,
- },
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+
+ response = self.controller.index(self.req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxTotalRAMSize": 512,
+ "maxTotalInstances": 5,
+ "maxTotalCores": 21,
+ "maxTotalKeypairs": 10,
+ "totalRAMUsed": 256,
+ "totalCoresUsed": 10,
+ "totalInstancesUsed": 2,
},
- }
- self.assertEqual(expected_response, response)
+ },
+ }
+ self.assertEqual(expected_response, response)
class LimitsControllerTestV239(BaseLimitTestSuite):
@@ -436,21 +432,20 @@ class LimitsControllerTestV239(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- response = self.controller.index(self.req)
- # staring from version 2.39 there is no 'maxImageMeta' field
- # in response after removing 'image-metadata' proxy API
- expected_response = {
- "limits": {
- "rate": [],
- "absolute": {
- "maxServerMeta": 1,
- },
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+
+ response = self.controller.index(self.req)
+ # starting from version 2.39 there is no 'maxImageMeta' field
+ # in response after removing 'image-metadata' proxy API
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxServerMeta": 1,
},
- }
- self.assertEqual(expected_response, response)
+ },
+ }
+ self.assertEqual(expected_response, response)
class LimitsControllerTestV275(BaseLimitTestSuite):
@@ -469,10 +464,9 @@ class LimitsControllerTestV275(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- self.controller.index(req)
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+ self.controller.index(req)
+ self.controller.index(req)
def test_index_additional_query_param(self):
req = fakes.HTTPRequest.blank("/?unkown=fake",
diff --git a/nova/tests/unit/api/openstack/compute/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
index 683759eccc..325b4927b3 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrate_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
@@ -530,9 +530,8 @@ class MigrateServerTestsV256(MigrateServerTestsV234):
self.req, fakes.FAKE_UUID, body=body)
def _test_migrate_exception(self, exc_info, expected_result):
- @mock.patch.object(self.compute_api, 'get')
@mock.patch.object(self.compute_api, 'resize', side_effect=exc_info)
- def _test(mock_resize, mock_get):
+ def _test(mock_resize):
instance = objects.Instance(uuid=uuids.instance)
self.assertRaises(expected_result,
self.controller._migrate,
diff --git a/nova/tests/unit/api/openstack/compute/test_quotas.py b/nova/tests/unit/api/openstack/compute/test_quotas.py
index 6cb8d9c7ad..7e4f9d1374 100644
--- a/nova/tests/unit/api/openstack/compute/test_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_quotas.py
@@ -882,7 +882,8 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
local_limit.KEY_PAIRS: 100,
local_limit.SERVER_GROUPS: 12,
local_limit.SERVER_GROUP_MEMBERS: 10}
- self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture(reglimits, {}))
@mock.patch.object(placement_limit, "get_legacy_project_limits")
def test_show_v21(self, mock_proj):
@@ -1098,7 +1099,7 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
local_limit.KEY_PAIRS: 1,
local_limit.SERVER_GROUPS: 3,
local_limit.SERVER_GROUP_MEMBERS: 2}
- self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ self.limit_fixture.reglimits = reglimits
req = fakes.HTTPRequest.blank("")
response = self.controller.defaults(req, uuids.project_id)
diff --git a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
index 6427b1abf0..f62093bbb7 100644
--- a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
+++ b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
@@ -103,6 +103,18 @@ class ConsolesExtensionTestV21(test.NoDBTestCase):
'get_vnc_console',
exception.InstanceNotFound(instance_id=fakes.FAKE_UUID))
+ def test_get_vnc_console_instance_invalid_state(self):
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ self._check_console_failure(
+ self.controller.get_vnc_console,
+ webob.exc.HTTPConflict,
+ body,
+ 'get_vnc_console',
+ exception.InstanceInvalidState(
+ attr='fake-attr', state='fake-state', method='fake-method',
+ instance_uuid=fakes.FAKE_UUID)
+ )
+
def test_get_vnc_console_invalid_type(self):
body = {'os-getVNCConsole': {'type': 'invalid'}}
self._check_console_failure(
diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py
index d07924abe8..b4daad1286 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py
@@ -66,11 +66,11 @@ class ServerActionsControllerTestV21(test.TestCase):
self.controller = self._get_controller()
self.compute_api = self.controller.compute_api
- # We don't care about anything getting as far as hitting the compute
- # RPC API so we just mock it out here.
- mock_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi')
- mock_rpcapi.start()
- self.addCleanup(mock_rpcapi.stop)
+ # In most of the cases we don't care about anything getting as far as
+ # hitting the compute RPC API so we just mock it out here.
+ patcher_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi')
+ self.mock_rpcapi = patcher_rpcapi.start()
+ self.addCleanup(patcher_rpcapi.stop)
# The project_id here matches what is used by default in
# fake_compute_get which need to match for policy checks.
self.req = fakes.HTTPRequest.blank('',
@@ -1079,21 +1079,23 @@ class ServerActionsControllerTestV21(test.TestCase):
snapshot = dict(id=_fake_id('d'))
+ self.mock_rpcapi.quiesce_instance.side_effect = (
+ exception.InstanceQuiesceNotSupported(
+ instance_id="fake", reason="test"
+ )
+ )
+
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
- mock.patch.object(self.controller.compute_api.compute_rpcapi,
- 'quiesce_instance',
- side_effect=exception.InstanceQuiesceNotSupported(
- instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
- ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
+ ) as (mock_get_limits, mock_vol_get, mock_vol_create):
if mock_vol_create_side_effect:
mock_vol_create.side_effect = mock_vol_create_side_effect
@@ -1125,7 +1127,7 @@ class ServerActionsControllerTestV21(test.TestCase):
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
- mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
+ self.mock_rpcapi.quiesce_instance.assert_called_once()
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
@@ -1189,21 +1191,23 @@ class ServerActionsControllerTestV21(test.TestCase):
snapshot = dict(id=_fake_id('d'))
+ self.mock_rpcapi.quiesce_instance.side_effect = (
+ exception.InstanceQuiesceNotSupported(
+ instance_id="fake", reason="test"
+ )
+ )
+
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
- mock.patch.object(self.controller.compute_api.compute_rpcapi,
- 'quiesce_instance',
- side_effect=exception.InstanceQuiesceNotSupported(
- instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
- ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
+ ) as (mock_get_limits, mock_vol_get, mock_vol_create):
response = self.controller._action_create_image(self.req,
FAKE_UUID, body=body)
@@ -1218,7 +1222,7 @@ class ServerActionsControllerTestV21(test.TestCase):
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
- mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
+ self.mock_rpcapi.quiesce_instance.assert_called_once()
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
index a0404baffc..81d1939e71 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
@@ -209,7 +209,8 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
self.flags(driver='nova.quota.UnifiedLimitsDriver', group='quota')
self.req = fakes.HTTPRequest.blank('')
self.controller = sg_v21.ServerGroupController()
- self.useFixture(limit_fixture.LimitFixture({'server_groups': 10}, {}))
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture({'server_groups': 10}, {}))
@mock.patch('nova.limit.local.enforce_db_limit')
def test_create_server_group_during_recheck(self, mock_enforce):
@@ -236,7 +237,7 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
delta=1)
def test_create_group_fails_with_zero_quota(self):
- self.useFixture(limit_fixture.LimitFixture({'server_groups': 0}, {}))
+ self.limit_fixture.reglimits = {'server_groups': 0}
sgroup = {'name': 'test', 'policies': ['anti-affinity']}
exc = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create,
@@ -245,7 +246,7 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
self.assertIn(msg, str(exc))
def test_create_only_one_group_when_limit_is_one(self):
- self.useFixture(limit_fixture.LimitFixture({'server_groups': 1}, {}))
+ self.limit_fixture.reglimits = {'server_groups': 1}
policies = ['anti-affinity']
sgroup = {'name': 'test', 'policies': policies}
res_dict = self.controller.create(
diff --git a/nova/tests/unit/api/openstack/compute/test_servers.py b/nova/tests/unit/api/openstack/compute/test_servers.py
index 31739ed7ab..4e2a694e15 100644
--- a/nova/tests/unit/api/openstack/compute/test_servers.py
+++ b/nova/tests/unit/api/openstack/compute/test_servers.py
@@ -2087,10 +2087,10 @@ class ServersControllerTestV216(_ServersControllerTest):
return server_dict
- @mock.patch('nova.compute.api.API.get_instance_host_status')
- def _verify_host_status_policy_behavior(self, func, mock_get_host_status):
+ def _verify_host_status_policy_behavior(self, func):
# Set policy to disallow both host_status cases and verify we don't
# call the get_instance_host_status compute RPC API.
+ self.mock_get_instance_host_status.reset_mock()
rules = {
'os_compute_api:servers:show:host_status': '!',
'os_compute_api:servers:show:host_status:unknown-only': '!',
@@ -2098,7 +2098,7 @@ class ServersControllerTestV216(_ServersControllerTest):
orig_rules = policy.get_rules()
policy.set_rules(oslo_policy.Rules.from_dict(rules), overwrite=False)
func()
- mock_get_host_status.assert_not_called()
+ self.mock_get_instance_host_status.assert_not_called()
# Restore the original rules.
policy.set_rules(orig_rules)
@@ -2638,15 +2638,13 @@ class ServersControllerTestV275(ControllerTest):
microversion = '2.75'
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_additional_query_param_old_version(self, mock_get):
+ def test_get_servers_additional_query_param_old_version(self):
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version='2.74')
self.controller.index(req)
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_ignore_sort_key_old_version(self, mock_get):
+ def test_get_servers_ignore_sort_key_old_version(self):
req = fakes.HTTPRequest.blank(
self.path_with_query % 'sort_key=deleted',
use_admin_context=True, version='2.74')
@@ -3584,13 +3582,13 @@ class ServersControllerRebuildTestV263(ControllerTest):
},
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get, certs=None,
- conf_enabled=True, conf_certs=None):
+ def _rebuild_server(self, certs=None, conf_enabled=True, conf_certs=None):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
- vm_state=vm_states.ACTIVE, trusted_certs=certs,
- project_id=self.req_project_id, user_id=self.req_user_id)
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(
+ ctx, vm_state=vm_states.ACTIVE, trusted_certs=certs,
+ project_id=self.req_project_id, user_id=self.req_user_id
+ )
self.flags(default_trusted_certificate_ids=conf_certs, group='glance')
@@ -3743,10 +3741,10 @@ class ServersControllerRebuildTestV271(ControllerTest):
}
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get):
+ def _rebuild_server(self):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(ctx,
vm_state=vm_states.ACTIVE, project_id=self.req_project_id,
user_id=self.req_user_id)
server = self.controller._action_rebuild(
diff --git a/nova/tests/unit/api/openstack/compute/test_volumes.py b/nova/tests/unit/api/openstack/compute/test_volumes.py
index a24c104c93..14d27d8546 100644
--- a/nova/tests/unit/api/openstack/compute/test_volumes.py
+++ b/nova/tests/unit/api/openstack/compute/test_volumes.py
@@ -1889,8 +1889,7 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
req, '5')
def _test_assisted_delete_instance_conflict(self, api_error):
- # unset the stub on volume_snapshot_delete from setUp
- self.mock_volume_snapshot_delete.stop()
+ self.mock_volume_snapshot_delete.side_effect = api_error
params = {
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
@@ -1899,10 +1898,9 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
urllib.parse.urlencode(params),
version=self.microversion)
req.method = 'DELETE'
- with mock.patch.object(compute_api.API, 'volume_snapshot_delete',
- side_effect=api_error):
- self.assertRaises(
- webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
+
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
def test_assisted_delete_instance_invalid_state(self):
api_error = exception.InstanceInvalidState(
diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py
index ba85590697..2d33c890b7 100644
--- a/nova/tests/unit/cmd/test_status.py
+++ b/nova/tests/unit/cmd/test_status.py
@@ -502,3 +502,19 @@ class TestCheckMachineTypeUnset(test.NoDBTestCase):
upgradecheck.Code.SUCCESS,
result.code
)
+
+
+class TestUpgradeCheckServiceUserToken(test.NoDBTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.cmd = status.UpgradeCommands()
+
+ def test_service_user_token_not_configured(self):
+ result = self.cmd._check_service_user_token()
+ self.assertEqual(upgradecheck.Code.FAILURE, result.code)
+
+ def test_service_user_token_configured(self):
+ self.flags(send_service_user_token=True, group='service_user')
+ result = self.cmd._check_service_user_token()
+ self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index eb5b0700d4..390dece66d 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -967,6 +967,31 @@ class _ComputeAPIUnitTestMixIn(object):
return snapshot_id
+ def _test_delete(self, delete_type, **attrs):
+ delete_time = datetime.datetime(
+ 1955, 11, 5, 9, 30, tzinfo=iso8601.UTC)
+ timeutils.set_time_override(delete_time)
+ self.addCleanup(timeutils.clear_time_override)
+
+ with test.nested(
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'confirm_resize'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'terminate_instance'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'soft_delete_instance'),
+ ) as (
+ mock_confirm, mock_terminate, mock_soft_delete
+ ):
+ self._do_delete(
+ delete_type,
+ mock_confirm,
+ mock_terminate,
+ mock_soft_delete,
+ delete_time,
+ **attrs
+ )
+
@mock.patch.object(compute_utils,
'notify_about_instance_action')
@mock.patch.object(objects.Migration, 'get_by_instance_and_status')
@@ -986,12 +1011,13 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=[])
@mock.patch.object(objects.Instance, 'save')
- def _test_delete(self, delete_type, mock_save, mock_bdm_get, mock_elevated,
- mock_get_cn, mock_up, mock_record, mock_inst_update,
- mock_deallocate, mock_inst_meta, mock_inst_destroy,
- mock_notify_legacy, mock_get_inst,
- mock_save_im, mock_image_delete, mock_mig_get,
- mock_notify, **attrs):
+ def _do_delete(
+ self, delete_type, mock_confirm, mock_terminate, mock_soft_delete,
+ delete_time, mock_save, mock_bdm_get, mock_elevated, mock_get_cn,
+ mock_up, mock_record, mock_inst_update, mock_deallocate,
+ mock_inst_meta, mock_inst_destroy, mock_notify_legacy, mock_get_inst,
+ mock_save_im, mock_image_delete, mock_mig_get, mock_notify, **attrs
+ ):
expected_save_calls = [mock.call()]
expected_record_calls = []
expected_elevated_calls = []
@@ -1001,17 +1027,11 @@ class _ComputeAPIUnitTestMixIn(object):
deltas = {'instances': -1,
'cores': -inst.flavor.vcpus,
'ram': -inst.flavor.memory_mb}
- delete_time = datetime.datetime(1955, 11, 5, 9, 30,
- tzinfo=iso8601.UTC)
- self.useFixture(utils_fixture.TimeFixture(delete_time))
task_state = (delete_type == 'soft_delete' and
task_states.SOFT_DELETING or task_states.DELETING)
updates = {'progress': 0, 'task_state': task_state}
if delete_type == 'soft_delete':
updates['deleted_at'] = delete_time
- rpcapi = self.compute_api.compute_rpcapi
- mock_confirm = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'confirm_resize')).mock
def _reset_task_state(context, instance, migration, src_host,
cast=False):
@@ -1026,11 +1046,6 @@ class _ComputeAPIUnitTestMixIn(object):
snapshot_id = self._set_delete_shelved_part(inst,
mock_image_delete)
- mock_terminate = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'terminate_instance')).mock
- mock_soft_delete = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'soft_delete_instance')).mock
-
if inst.task_state == task_states.RESIZE_FINISH:
self._test_delete_resizing_part(inst, deltas)
@@ -2637,9 +2652,6 @@ class _ComputeAPIUnitTestMixIn(object):
rpcapi = self.compute_api.compute_rpcapi
- mock_pause = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'pause_instance')).mock
-
with mock.patch.object(rpcapi, 'pause_instance') as mock_pause:
self.compute_api.pause(self.context, instance)
@@ -5624,7 +5636,10 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
- rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({})
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
with test.nested(
mock.patch.object(self.compute_api.placementclient,
@@ -5676,6 +5691,7 @@ class _ComputeAPIUnitTestMixIn(object):
# Assert that the instance task state as set in the compute API
self.assertEqual(task_states.RESCUING, instance.task_state)
+ @mock.patch('nova.objects.instance.Instance.image_meta')
@mock.patch('nova.objects.compute_node.ComputeNode'
'.get_by_host_and_nodename')
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -5684,7 +5700,8 @@ class _ComputeAPIUnitTestMixIn(object):
'.get_by_instance_uuid')
def test_rescue_bfv_without_required_trait(self, mock_get_bdms,
mock_is_volume_backed,
- mock_get_cn):
+ mock_get_cn,
+ mock_image_meta):
instance = self._create_instance_obj()
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
@@ -5692,6 +5709,12 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
+
+ instance.image_meta = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
with test.nested(
mock.patch.object(self.compute_api.placementclient,
'get_provider_traits'),
@@ -5729,6 +5752,124 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_traits.assert_called_once_with(
self.context, uuids.cn)
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.objects.compute_node.ComputeNode'
+ '.get_by_host_and_nodename')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_with_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed, mock_get_cn,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.placementclient,
+ 'get_provider_traits'),
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ mock_get_traits, mock_get_volume, mock_check_attached,
+ mock_instance_save, mock_record_start, mock_rpcapi_rescue
+ ):
+ # Mock out the returned compute node, image_meta, bdms and volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_get_cn.return_value = mock.Mock(uuid=uuids.cn)
+
+ # Ensure the required trait is returned, allowing BFV rescue
+ mock_trait_info = mock.Mock(traits=[ot.COMPUTE_RESCUE_BFV])
+ mock_get_traits.return_value = mock_trait_info
+
+ # Try to rescue the instance
+ self.compute_api.rescue(self.context, instance,
+ rescue_image_ref=uuids.rescue_image_id,
+ allow_bfv_rescue=True)
+
+ # Assert all of the calls made in the compute API
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+ mock_get_cn.assert_called_once_with(
+ self.context, instance.host, instance.node)
+ mock_get_traits.assert_called_once_with(self.context, uuids.cn)
+ mock_instance_save.assert_called_once_with(
+ expected_task_state=[None])
+ mock_record_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ mock_rpcapi_rescue.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=uuids.rescue_image_id, clean_shutdown=True)
+
+ # Assert that the instance task state as set in the compute API
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_without_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ ) as (
+ mock_get_volume, mock_check_attached
+ ):
+ # Mock out the returned bdms, volume and image_meta
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+
+ # Assert that any attempt to rescue a bfv instance on a compute
+ # node that does not report the COMPUTE_RESCUE_BFV trait fails and
+ # raises InstanceNotRescuable
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context, instance,
+ rescue_image_ref=None, allow_bfv_rescue=True)
+
+ # Assert the calls made in the compute API prior to the failure
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch('nova.objects.block_device.BlockDeviceMappingList'
@@ -7741,16 +7882,13 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.assertTrue(hasattr(self.compute_api, 'host'))
self.assertEqual(CONF.host, self.compute_api.host)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per API class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.compute_api._placementclient)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.compute_api.placementclient
+ self.assertFalse(mock_report_client.called)
+ self.compute_api.placementclient
mock_report_client.assert_called_once_with()
def test_validate_host_for_cold_migrate_same_host_fails(self):
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index d8f443843f..f2ea9c3c00 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -5714,13 +5714,15 @@ class ComputeTestCase(BaseTestCase,
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0a:00.1',
- request_id=uuids.req1)])
+ request_id=uuids.req1,
+ compute_node_id=1)])
new_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0b:00.1',
- request_id=uuids.req2)])
+ request_id=uuids.req2,
+ compute_node_id=2)])
if expected_pci_addr == old_pci_devices[0].address:
expected_pci_device = old_pci_devices[0]
@@ -8618,16 +8620,13 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_instance_sets_system_metadata(self):
# Make sure image properties are copied into system metadata.
- with mock.patch.object(
- self.compute_api.compute_task_api, 'schedule_and_build_instances',
- ) as mock_sbi:
- ref, resv_id = self.compute_api.create(
- self.context,
- flavor=self.default_flavor,
- image_href='f5000000-0000-0000-0000-000000000000')
+ ref, resv_id = self.compute_api.create(
+ self.context,
+ flavor=self.default_flavor,
+ image_href='f5000000-0000-0000-0000-000000000000')
- build_call = mock_sbi.call_args_list[0]
- instance = build_call[1]['build_requests'][0].instance
+ build_call = self.schedule_and_build_instances_mock.call_args_list[0]
+ instance = build_call[1]['build_requests'][0].instance
image_props = {'image_kernel_id': uuids.kernel_id,
'image_ramdisk_id': uuids.ramdisk_id,
@@ -8637,16 +8636,14 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(value, instance.system_metadata[key])
def test_create_saves_flavor(self):
- with mock.patch.object(
- self.compute_api.compute_task_api, 'schedule_and_build_instances',
- ) as mock_sbi:
- ref, resv_id = self.compute_api.create(
- self.context,
- flavor=self.default_flavor,
- image_href=uuids.image_href_id)
+ ref, resv_id = self.compute_api.create(
+ self.context,
+ flavor=self.default_flavor,
+ image_href=uuids.image_href_id)
+
+ build_call = self.schedule_and_build_instances_mock.call_args_list[0]
+ instance = build_call[1]['build_requests'][0].instance
- build_call = mock_sbi.call_args_list[0]
- instance = build_call[1]['build_requests'][0].instance
self.assertIn('flavor', instance)
self.assertEqual(self.default_flavor.flavorid,
instance.flavor.flavorid)
@@ -8654,19 +8651,18 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_instance_associates_security_groups(self):
# Make sure create associates security groups.
- with test.nested(
- mock.patch.object(self.compute_api.compute_task_api,
- 'schedule_and_build_instances'),
- mock.patch('nova.network.security_group_api.validate_name',
- return_value=uuids.secgroup_id),
- ) as (mock_sbi, mock_secgroups):
+ with mock.patch(
+ "nova.network.security_group_api.validate_name",
+ return_value=uuids.secgroup_id,
+ ) as mock_secgroups:
self.compute_api.create(
self.context,
flavor=self.default_flavor,
image_href=uuids.image_href_id,
security_groups=['testgroup'])
- build_call = mock_sbi.call_args_list[0]
+ build_call = (
+ self.schedule_and_build_instances_mock.call_args_list[0])
reqspec = build_call[1]['request_spec'][0]
self.assertEqual(1, len(reqspec.security_groups))
@@ -8701,22 +8697,19 @@ class ComputeAPITestCase(BaseTestCase):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.port_instance)])
- with test.nested(
- mock.patch.object(
- self.compute_api.compute_task_api,
- 'schedule_and_build_instances'),
- mock.patch.object(
- self.compute_api.network_api,
- 'create_resource_requests',
- return_value=(None, [], objects.RequestLevelParams())),
- ) as (mock_sbi, _mock_create_resreqs):
+ with mock.patch.object(
+ self.compute_api.network_api,
+ "create_resource_requests",
+ return_value=(None, [], objects.RequestLevelParams()),
+ ):
self.compute_api.create(
self.context,
flavor=self.default_flavor,
image_href=uuids.image_href_id,
requested_networks=requested_networks)
- build_call = mock_sbi.call_args_list[0]
+ build_call = (
+ self.schedule_and_build_instances_mock.call_args_list[0])
reqspec = build_call[1]['request_spec'][0]
self.assertEqual(1, len(reqspec.requested_networks))
@@ -10216,8 +10209,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.get_console_output,
self.context, instance)
- @mock.patch.object(compute_utils, 'notify_about_instance_action')
- def test_attach_interface(self, mock_notify):
+ def test_attach_interface(self):
instance = self._create_fake_instance_obj()
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
@@ -10237,8 +10229,12 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch.object(
self.compute,
"_claim_pci_device_for_interface_attach",
- return_value=None)
- ) as (cap, mock_lock, mock_create_resource_req, mock_claim_pci):
+ return_value=None),
+ mock.patch.object(compute_utils, 'notify_about_instance_action'),
+ ) as (
+ cap, mock_lock, mock_create_resource_req, mock_claim_pci,
+ mock_notify
+ ):
mock_create_resource_req.return_value = (
None, [], mock.sentinel.req_lvl_params)
vif = self.compute.attach_interface(self.context,
@@ -11056,8 +11052,7 @@ class ComputeAPITestCase(BaseTestCase):
mock_remove_res.assert_called_once_with(
self.context, instance.uuid, mock.sentinel.resources)
- @mock.patch.object(compute_utils, 'notify_about_instance_action')
- def test_detach_interface(self, mock_notify):
+ def test_detach_interface(self):
nwinfo, port_id = self.test_attach_interface()
instance = self._create_fake_instance_obj()
instance.info_cache = objects.InstanceInfoCache.new(
@@ -11090,10 +11085,13 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch('nova.pci.request.get_instance_pci_request_from_vif',
return_value=pci_req),
mock.patch.object(self.compute.rt, 'unclaim_pci_devices'),
- mock.patch.object(instance, 'save')
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(compute_utils, 'notify_about_instance_action'),
) as (
- mock_remove_alloc, mock_deallocate, mock_lock,
- mock_get_pci_req, mock_unclaim_pci, mock_instance_save):
+ mock_remove_alloc, mock_deallocate, mock_lock,
+ mock_get_pci_req, mock_unclaim_pci, mock_instance_save,
+ mock_notify
+ ):
self.compute.detach_interface(self.context, instance, port_id)
mock_deallocate.assert_called_once_with(
@@ -11900,17 +11898,16 @@ class ComputeAPITestCase(BaseTestCase):
instance.save()
@mock.patch.object(objects.Service, 'get_by_compute_host')
- @mock.patch.object(self.compute_api.compute_task_api,
- 'rebuild_instance')
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.RequestSpec,
'get_by_instance_uuid')
@mock.patch.object(self.compute_api.servicegroup_api, 'service_is_up')
- def do_test(service_is_up, get_by_instance_uuid, get_all_by_host,
- rebuild_instance, get_service):
+ def do_test(
+ service_is_up, get_by_instance_uuid, get_all_by_host, get_service
+ ):
service_is_up.return_value = False
get_by_instance_uuid.return_value = fake_spec
- rebuild_instance.side_effect = fake_rebuild_instance
+ self.rebuild_instance_mock.side_effect = fake_rebuild_instance
get_all_by_host.return_value = objects.ComputeNodeList(
objects=[objects.ComputeNode(
host='fake_dest_host',
@@ -11928,7 +11925,7 @@ class ComputeAPITestCase(BaseTestCase):
host = None
else:
host = 'fake_dest_host'
- rebuild_instance.assert_called_once_with(
+ self.rebuild_instance_mock.assert_called_once_with(
ctxt,
instance=instance,
new_pass=None,
@@ -13046,16 +13043,13 @@ class ComputeAPIAggrTestCase(BaseTestCase):
hosts = aggregate.hosts if 'hosts' in aggregate else None
self.assertIn(values[0][1][0], hosts)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per AggregateAPI class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.api._placement_client)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.api.placement_client
+ self.assertFalse(mock_report_client.called)
+ self.api.placement_client
mock_report_client.assert_called_once_with()
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 760ea79e87..62f15d0d93 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -1306,6 +1306,36 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute._init_instance(self.context, instance)
set_error_state.assert_called_once_with(instance)
+ def test_init_instance_vif_plug_fails_missing_pci(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid=uuids.instance,
+ info_cache=None,
+ power_state=power_state.RUNNING,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ host=self.compute.host,
+ expected_attrs=['info_cache'])
+
+ with test.nested(
+ mock.patch.object(context, 'get_admin_context',
+ return_value=self.context),
+ mock.patch.object(objects.Instance, 'get_network_info',
+ return_value=network_model.NetworkInfo()),
+ mock.patch.object(self.compute.driver, 'plug_vifs',
+ side_effect=exception.PciDeviceNotFoundById("pci-addr")),
+ mock.patch("nova.compute.manager.LOG.exception"),
+ ) as (get_admin_context, get_nw_info, plug_vifs, log_exception):
+ # as this does not raise, we are sure that the compute service
+ # continues initializing the rest of the instances
+ self.compute._init_instance(self.context, instance)
+ log_exception.assert_called_once_with(
+ "Virtual interface plugging failed for instance. Probably the "
+ "vnic_type of the bound port has been changed. Nova does not "
+ "support such change.",
+ instance=instance
+ )
+
def _test__validate_pinning_configuration(self, supports_pcpus=True):
instance_1 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_1)
@@ -7585,6 +7615,27 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance, hints)
mock_get.assert_called_once_with(self.context, uuids.group_hint)
+ @mock.patch('nova.objects.InstanceGroup.get_by_hint')
+ def test_validate_instance_group_policy_deleted_group(self, mock_get):
+ """Tests that _validate_instance_group_policy handles the case
+ where the scheduler hint has a group but that group has been deleted.
+ This tests is a reproducer for bug: #1890244
+ """
+ instance = objects.Instance(uuid=uuids.instance)
+ hints = {'group': [uuids.group_hint]}
+ mock_get.side_effect = exception.InstanceGroupNotFound(
+ group_uuid=uuids.group_hint
+ )
+ # This implicitly asserts that no exception is raised since
+ # uncaught exceptions would be treated as a test failure.
+ self.compute._validate_instance_group_policy(
+ self.context, instance, hints
+ )
+ # and this just assert that we did in fact invoke the method
+ # that raises to ensure that if we refactor in the future this
+ # this test will fail if the function we mock is no longer called.
+ mock_get.assert_called_once_with(self.context, uuids.group_hint)
+
@mock.patch('nova.objects.InstanceGroup.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@mock.patch('nova.objects.InstanceGroup.get_by_hint')
@@ -8563,11 +8614,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_test(get_by_instance_uuid,
- migration_save,
notify_usage_exists,
migrate_instance_start,
setup_networks_on_host,
@@ -8639,7 +8688,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'migrate_instance_finish',
side_effect=_migrate_instance_finish)
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.instance, 'save')
@mock.patch.object(self.compute, '_set_instance_info')
@mock.patch.object(db, 'instance_fault_create')
@@ -8653,7 +8701,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
fault_create,
set_instance_info,
instance_save,
- migration_save,
setup_networks_on_host,
migrate_instance_finish,
get_instance_nw_info,
@@ -8697,11 +8744,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
@mock.patch.object(db, 'instance_extra_update_by_uuid')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_revert_resize(mock_get_by_instance_uuid,
- mock_migration_save,
mock_extra_update,
mock_notify_usage_exists,
mock_migrate_instance_start,
@@ -8748,7 +8793,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@mock.patch.object(self.compute, "_set_instance_info")
@mock.patch.object(self.instance, 'save')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(db, 'instance_fault_create')
@mock.patch.object(db, 'instance_extra_update_by_uuid')
@@ -8772,7 +8816,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock_extra_update,
mock_fault_create,
mock_fault_from_exc,
- mock_mig_save,
mock_inst_save,
mock_set,
mock_notify_about_instance_action,
@@ -8866,7 +8909,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute, '_delete_scheduler_instance_info')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.Migration.get_by_id')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(self.compute.driver, 'confirm_migration')
@@ -8875,7 +8917,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.instance, 'save')
def do_confirm_resize(mock_save, mock_drop, mock_delete,
mock_confirm, mock_nwapi, mock_notify,
- mock_mig_save, mock_mig_get, mock_inst_get,
+ mock_mig_get, mock_inst_get,
mock_delete_scheduler_info):
self._mock_rt()
@@ -8958,16 +9000,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
instance_get_by_uuid.assert_called_once()
def test_confirm_resize_calls_virt_driver_with_old_pci(self):
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(self.compute.driver, 'confirm_migration')
@mock.patch.object(self.compute, '_delete_allocation_after_move')
@mock.patch.object(self.instance, 'drop_migration_context')
@mock.patch.object(self.instance, 'save')
- def do_confirm_resize(mock_save, mock_drop, mock_delete,
- mock_confirm, mock_nwapi, mock_notify,
- mock_mig_save):
+ def do_confirm_resize(
+ mock_save, mock_drop, mock_delete, mock_confirm, mock_nwapi,
+ mock_notify
+ ):
# Mock virt driver confirm_resize() to save the provided
# network_info, we will check it later.
updated_nw_info = []
@@ -8983,10 +9025,12 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self._mock_rt()
old_devs = objects.PciDeviceList(
objects=[objects.PciDevice(
+ compute_node_id=1,
address='0000:04:00.2',
request_id=uuids.pcidev1)])
new_devs = objects.PciDeviceList(
objects=[objects.PciDevice(
+ compute_node_id=2,
address='0000:05:00.3',
request_id=uuids.pcidev1)])
self.instance.migration_context = objects.MigrationContext(
@@ -9958,6 +10002,27 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.instance,
migration)
+ def test_post_live_migration_update_host(self):
+ @mock.patch.object(self.compute, '_get_compute_info')
+ def _test_post_live_migration(_get_compute_info):
+ dest_host = 'dest'
+ cn = objects.ComputeNode(hypervisor_hostname=dest_host)
+ _get_compute_info.return_value = cn
+ instance = fake_instance.fake_instance_obj(self.context,
+ node='src',
+ uuid=uuids.instance)
+ with mock.patch.object(self.compute, "_post_live_migration"
+ ) as plm, mock.patch.object(instance, "save") as save:
+ error = ValueError("some failure")
+ plm.side_effect = error
+ self.assertRaises(
+ ValueError, self.compute._post_live_migration_update_host,
+ self.context, instance, dest_host)
+ save.assert_called_once()
+ self.assertEqual(instance.host, dest_host)
+
+ _test_post_live_migration()
+
def test_post_live_migration_cinder_pre_344_api(self):
# Because live migration has
# succeeded,_post_live_migration_remove_source_vol_connections()
@@ -10957,40 +11022,94 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
_test()
def test__update_migrate_vifs_profile_with_pci(self):
- # Define two migrate vifs with only one pci that is required
- # to be updated. Make sure method under test updated the correct one
+ # Define three migrate vifs with two pci devs that are required
+ # to be updated, one VF and on PF.
+ # Make sure method under test updated the correct devs with the correct
+ # values.
nw_vifs = network_model.NetworkInfo(
- [network_model.VIF(
- id=uuids.port0,
- vnic_type='direct',
- type=network_model.VIF_TYPE_HW_VEB,
- profile={'pci_slot': '0000:04:00.3',
- 'pci_vendor_info': '15b3:1018',
- 'physical_network': 'default'}),
- network_model.VIF(
- id=uuids.port1,
- vnic_type='normal',
- type=network_model.VIF_TYPE_OVS,
- profile={'some': 'attribute'})])
- pci_dev = objects.PciDevice(request_id=uuids.pci_req,
- address='0000:05:00.4',
- vendor_id='15b3',
- product_id='1018')
- port_id_to_pci_dev = {uuids.port0: pci_dev}
- mig_vifs = migrate_data_obj.VIFMigrateData.\
- create_skeleton_migrate_vifs(nw_vifs)
- self.compute._update_migrate_vifs_profile_with_pci(mig_vifs,
- port_id_to_pci_dev)
+ [
+ network_model.VIF(
+ id=uuids.port0,
+ vnic_type='direct',
+ type=network_model.VIF_TYPE_HW_VEB,
+ profile={
+ 'pci_slot': '0000:04:00.3',
+ 'pci_vendor_info': '15b3:1018',
+ 'physical_network': 'default',
+ },
+ ),
+ network_model.VIF(
+ id=uuids.port1,
+ vnic_type='normal',
+ type=network_model.VIF_TYPE_OVS,
+ profile={'some': 'attribute'},
+ ),
+ network_model.VIF(
+ id=uuids.port2,
+ vnic_type='direct-physical',
+ type=network_model.VIF_TYPE_HOSTDEV,
+ profile={
+ 'pci_slot': '0000:01:00',
+ 'pci_vendor_info': '8086:154d',
+ 'physical_network': 'physnet2',
+ },
+ ),
+ ]
+ )
+
+ pci_vf_dev = objects.PciDevice(
+ request_id=uuids.pci_req,
+ address='0000:05:00.4',
+ parent_addr='0000:05:00',
+ vendor_id='15b3',
+ product_id='1018',
+ compute_node_id=13,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ )
+ pci_pf_dev = objects.PciDevice(
+ request_id=uuids.pci_req2,
+ address='0000:01:00',
+ parent_addr='0000:02:00',
+ vendor_id='8086',
+ product_id='154d',
+ compute_node_id=13,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'},
+ )
+ port_id_to_pci_dev = {
+ uuids.port0: pci_vf_dev,
+ uuids.port2: pci_pf_dev,
+ }
+ mig_vifs = (
+ migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
+ nw_vifs)
+ )
+
+ self.compute._update_migrate_vifs_profile_with_pci(
+ mig_vifs, port_id_to_pci_dev)
+
# Make sure method under test updated the correct one.
- changed_mig_vif = mig_vifs[0]
+ changed_vf_mig_vif = mig_vifs[0]
unchanged_mig_vif = mig_vifs[1]
+ changed_pf_mig_vif = mig_vifs[2]
# Migrate vifs profile was updated with pci_dev.address
# for port ID uuids.port0.
- self.assertEqual(changed_mig_vif.profile['pci_slot'],
- pci_dev.address)
+ self.assertEqual(changed_vf_mig_vif.profile['pci_slot'],
+ pci_vf_dev.address)
+ # MAC is not added as this is a VF
+ self.assertNotIn('device_mac_address', changed_vf_mig_vif.profile)
# Migrate vifs profile was unchanged for port ID uuids.port1.
# i.e 'profile' attribute does not exist.
self.assertNotIn('profile', unchanged_mig_vif)
+ # Migrate vifs profile was updated with pci_dev.address
+ # for port ID uuids.port2.
+ self.assertEqual(changed_pf_mig_vif.profile['pci_slot'],
+ pci_pf_dev.address)
+ # MAC is updated as this is a PF
+ self.assertEqual(
+ 'b4:96:91:34:f4:36',
+ changed_pf_mig_vif.profile['device_mac_address']
+ )
def test_get_updated_nw_info_with_pci_mapping(self):
old_dev = objects.PciDevice(address='0000:04:00.2')
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index caa12cb754..5aab64e72c 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -4205,9 +4205,9 @@ class TestCleanComputeNodeCache(BaseTestCase):
invalid_nodename = "invalid-node"
self.rt.compute_nodes[_NODENAME] = self.compute
self.rt.compute_nodes[invalid_nodename] = mock.sentinel.compute
- with mock.patch.object(
- self.rt.reportclient, "invalidate_resource_provider",
- ) as mock_invalidate:
- self.rt.clean_compute_node_cache([self.compute])
- mock_remove.assert_called_once_with(invalid_nodename)
- mock_invalidate.assert_called_once_with(invalid_nodename)
+ mock_invalidate = self.rt.reportclient.invalidate_resource_provider
+
+ self.rt.clean_compute_node_cache([self.compute])
+
+ mock_remove.assert_called_once_with(invalid_nodename)
+ mock_invalidate.assert_called_once_with(invalid_nodename)
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
index cb40c076c8..dd4ee7c3fe 100644
--- a/nova/tests/unit/conductor/tasks/test_live_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -345,6 +345,36 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
mock.call(self.destination)],
mock_get_info.call_args_list)
+ @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
+ def test_skip_hypervisor_version_check_on_lm_raise_ex(self, mock_get_info):
+ host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
+ host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
+ self.flags(group='workarounds',
+ skip_hypervisor_version_check_on_lm=False)
+ mock_get_info.side_effect = [objects.ComputeNode(**host1),
+ objects.ComputeNode(**host2)]
+ self.assertRaises(exception.DestinationHypervisorTooOld,
+ self.task._check_compatible_with_source_hypervisor,
+ self.destination)
+ self.assertEqual([mock.call(self.instance_host),
+ mock.call(self.destination)],
+ mock_get_info.call_args_list)
+
+ @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
+ def test_skip_hypervisor_version_check_on_lm_do_not_raise_ex(
+ self, mock_get_info
+ ):
+ host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
+ host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
+ self.flags(group='workarounds',
+ skip_hypervisor_version_check_on_lm=True)
+ mock_get_info.side_effect = [objects.ComputeNode(**host1),
+ objects.ComputeNode(**host2)]
+ self.task._check_compatible_with_source_hypervisor(self.destination)
+ self.assertEqual([mock.call(self.instance_host),
+ mock.call(self.destination)],
+ mock_get_info.call_args_list)
+
@mock.patch.object(compute_rpcapi.ComputeAPI,
'check_can_live_migrate_destination')
def test_check_requested_destination(self, mock_check):
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 15aa960aad..8c954db9a7 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -17,6 +17,8 @@
import copy
+import ddt
+from keystoneauth1 import exceptions as ks_exc
import mock
from oslo_db import exception as db_exc
from oslo_limit import exception as limit_exceptions
@@ -52,6 +54,7 @@ from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import request_spec
from nova.scheduler.client import query
+from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fixtures
@@ -4869,3 +4872,35 @@ class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
logtext)
self.assertIn('host3\' because it is not up', logtext)
self.assertIn('image1 failed 1 times', logtext)
+
+
+@ddt.ddt
+class TestConductorTaskManager(test.NoDBTestCase):
+ def test_placement_client_startup(self):
+ self.assertIsNone(report.PLACEMENTCLIENT)
+ conductor_manager.ComputeTaskManager()
+ self.assertIsNotNone(report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ test.TestingException)
+ def test_placement_client_startup_fatals(self, exc):
+ self.assertRaises(exc,
+ self._test_placement_client_startup_exception, exc)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure)
+ def test_placement_client_startup_non_fatal(self, exc):
+ self._test_placement_client_startup_exception(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_placement_client_startup_exception(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ try:
+ conductor_manager.ComputeTaskManager()
+ finally:
+ mock_log.error.assert_called_once()
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index e05ae520d9..0c897e3e91 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -589,12 +589,12 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
- def test_reject_open_redirect(self):
+ def test_reject_open_redirect(self, url='//example.com/%2F..'):
# This will test the behavior when an attempt is made to cause an open
# redirect. It should be rejected.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
- b'GET //example.com/%2F.. HTTP/1.1\r\n',
+ f'GET {url} HTTP/1.1\r\n'.encode('utf-8'),
b''
]
@@ -619,41 +619,32 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
result = output.readlines()
# Verify no redirect happens and instead a 400 Bad Request is returned.
- self.assertIn('400 URI must not start with //', result[0].decode())
+ # NOTE: As of python 3.10.6 there is a fix for this vulnerability,
+ # which will cause a 301 Moved Permanently error to be returned
+ # instead that redirects to a sanitized version of the URL with extra
+ # leading '/' characters removed.
+ # See https://github.com/python/cpython/issues/87389 for details.
+ # We will consider either response to be valid for this test. This will
+ # also help if and when the above fix gets backported to older versions
+ # of python.
+ errmsg = result[0].decode()
+ expected_nova = '400 URI must not start with //'
+ expected_cpython = '301 Moved Permanently'
+
+ self.assertTrue(expected_nova in errmsg or expected_cpython in errmsg)
+
+ # If we detect the cpython fix, verify that the redirect location is
+ # now the same url but with extra leading '/' characters removed.
+ if expected_cpython in errmsg:
+ location = result[3].decode()
+ location = location.removeprefix('Location: ').rstrip('\r\n')
+ self.assertTrue(
+ location.startswith('/example.com/%2F..'),
+ msg='Redirect location is not the expected sanitized URL',
+ )
def test_reject_open_redirect_3_slashes(self):
- # This will test the behavior when an attempt is made to cause an open
- # redirect. It should be rejected.
- mock_req = mock.MagicMock()
- mock_req.makefile().readline.side_effect = [
- b'GET ///example.com/%2F.. HTTP/1.1\r\n',
- b''
- ]
-
- # Collect the response data to verify at the end. The
- # SimpleHTTPRequestHandler writes the response data by calling the
- # request socket sendall() method.
- self.data = b''
-
- def fake_sendall(data):
- self.data += data
-
- mock_req.sendall.side_effect = fake_sendall
-
- client_addr = ('8.8.8.8', 54321)
- mock_server = mock.MagicMock()
- # This specifies that the server will be able to handle requests other
- # than only websockets.
- mock_server.only_upgrade = False
-
- # Constructing a handler will process the mock_req request passed in.
- websocketproxy.NovaProxyRequestHandler(
- mock_req, client_addr, mock_server)
-
- # Verify no redirect happens and instead a 400 Bad Request is returned.
- self.data = self.data.decode()
- self.assertIn('Error code: 400', self.data)
- self.assertIn('Message: URI must not start with //', self.data)
+ self.test_reject_open_redirect(url='///example.com/%2F..')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_no_compute_rpcapi_with_invalid_token(self, mock_validate):
diff --git a/nova/tests/unit/db/main/test_api.py b/nova/tests/unit/db/main/test_api.py
index c9a9e83154..e869d0403c 100644
--- a/nova/tests/unit/db/main/test_api.py
+++ b/nova/tests/unit/db/main/test_api.py
@@ -279,33 +279,21 @@ class DecoratorTestCase(test.TestCase):
'No DB access allowed in ',
mock_log.error.call_args[0][0])
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_writer_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_writer_disable_db_access(self):
@db.pick_context_manager_writer
def func(context, value):
pass
self._test_pick_context_manager_disable_db_access(func)
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_reader_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_reader_disable_db_access(self):
@db.pick_context_manager_reader
def func(context, value):
pass
self._test_pick_context_manager_disable_db_access(func)
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_reader_allow_async_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_reader_allow_async_disable_db_access(self):
@db.pick_context_manager_reader_allow_async
def func(context, value):
pass
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index 40137cef39..458777c3a3 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -143,6 +143,22 @@ class TestNeutronClient(test.NoDBTestCase):
self.assertIsInstance(cl.httpclient.auth,
service_token.ServiceTokenAuthWrapper)
+ @mock.patch('nova.service_auth._SERVICE_AUTH')
+ @mock.patch('nova.network.neutron._ADMIN_AUTH')
+ @mock.patch.object(ks_loading, 'load_auth_from_conf_options')
+ def test_admin_with_service_token(
+ self, mock_load, mock_admin_auth, mock_service_auth
+ ):
+ self.flags(send_service_user_token=True, group='service_user')
+
+ admin_context = context.get_admin_context()
+
+ cl = neutronapi.get_client(admin_context)
+ self.assertIsInstance(cl.httpclient.auth,
+ service_token.ServiceTokenAuthWrapper)
+ self.assertEqual(mock_admin_auth, cl.httpclient.auth.user_auth)
+ self.assertEqual(mock_service_auth, cl.httpclient.auth.service_auth)
+
@mock.patch.object(client.Client, "list_networks",
side_effect=exceptions.Unauthorized())
def test_Unauthorized_user(self, mock_list_networks):
@@ -3383,6 +3399,155 @@ class TestAPI(TestAPIBase):
mocked_client.list_ports.assert_called_once_with(
tenant_id=uuids.fake, device_id=uuids.instance)
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_full_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_single_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ fake_nets = [
+ {
+ "id": "net-id",
+ "name": "foo",
+ "tenant_id": uuids.fake,
+ }
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
@mock.patch.object(neutronapi, 'get_client')
def test_get_subnets_from_port(self, mock_get_client):
mocked_client = mock.create_autospec(client.Client)
@@ -4809,6 +4974,174 @@ class TestAPI(TestAPIBase):
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=False),
)
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
+ def test_update_port_bindings_for_instance_with_sriov_pf(
+ self, get_client_mock, get_pci_device_devspec_mock
+ ):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ get_pci_device_devspec_mock.return_value = devspec
+
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.migration_context = objects.MigrationContext()
+ instance.migration_context.old_pci_devices = objects.PciDeviceList(
+ objects=[
+ objects.PciDevice(
+ vendor_id='8086',
+ product_id='154d',
+ address='0000:0a:01',
+ compute_node_id=1,
+ request_id=uuids.pci_req,
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'},
+ )
+ ]
+ )
+ instance.pci_devices = instance.migration_context.old_pci_devices
+ instance.migration_context.new_pci_devices = objects.PciDeviceList(
+ objects=[
+ objects.PciDevice(
+ vendor_id='8086',
+ product_id='154d',
+ address='0000:0a:02',
+ compute_node_id=2,
+ request_id=uuids.pci_req,
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:dd'},
+ )
+ ]
+ )
+ instance.pci_devices = instance.migration_context.new_pci_devices
+
+ fake_ports = {
+ 'ports': [
+ {
+ 'id': uuids.port,
+ 'binding:vnic_type': 'direct-physical',
+ constants.BINDING_HOST_ID: 'fake-host-old',
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:01',
+ 'physical_network': 'old_phys_net',
+ 'pci_vendor_info': 'old_pci_vendor_info',
+ },
+ },
+ ]
+ }
+
+ migration = objects.Migration(
+ status='confirmed', migration_type='migration')
+ list_ports_mock = mock.Mock(return_value=fake_ports)
+ get_client_mock.return_value.list_ports = list_ports_mock
+
+ update_port_mock = mock.Mock()
+ get_client_mock.return_value.update_port = update_port_mock
+
+ self.api._update_port_binding_for_instance(
+ self.context, instance, instance.host, migration)
+
+ # Assert that update_port is called with the binding:profile
+ # corresponding to the PCI device specified including MAC address.
+ update_port_mock.assert_called_once_with(
+ uuids.port,
+ {
+ 'port': {
+ constants.BINDING_HOST_ID: 'fake-host',
+ 'device_owner': 'compute:%s' % instance.availability_zone,
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:02',
+ 'physical_network': 'physnet1',
+ 'pci_vendor_info': '8086:154d',
+ 'device_mac_address': 'b4:96:91:34:f4:dd',
+ },
+ }
+ },
+ )
+
+ @mock.patch(
+ 'nova.network.neutron.API.has_extended_resource_request_extension',
+ new=mock.Mock(return_value=False),
+ )
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
+ def test_update_port_bindings_for_instance_with_sriov_pf_no_migration(
+ self, get_client_mock, get_pci_device_devspec_mock
+ ):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ get_pci_device_devspec_mock.return_value = devspec
+
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.pci_requests = objects.InstancePCIRequests(
+ instance_uuid=instance.uuid,
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port,
+ request_id=uuids.pci_req,
+ )
+ ],
+ )
+ instance.pci_devices = objects.PciDeviceList(
+ objects=[
+ objects.PciDevice(
+ vendor_id='8086',
+ product_id='154d',
+ address='0000:0a:02',
+ compute_node_id=2,
+ request_id=uuids.pci_req,
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'},
+ )
+ ]
+ )
+
+ fake_ports = {
+ 'ports': [
+ {
+ 'id': uuids.port,
+ 'binding:vnic_type': 'direct-physical',
+ constants.BINDING_HOST_ID: 'fake-host-old',
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:01',
+ 'physical_network': 'old_phys_net',
+ 'pci_vendor_info': 'old_pci_vendor_info',
+ 'device_mac_address': 'b4:96:91:34:f4:dd'
+ },
+ },
+ ]
+ }
+
+ list_ports_mock = mock.Mock(return_value=fake_ports)
+ get_client_mock.return_value.list_ports = list_ports_mock
+
+ update_port_mock = mock.Mock()
+ get_client_mock.return_value.update_port = update_port_mock
+
+ self.api._update_port_binding_for_instance(
+ self.context, instance, instance.host)
+
+ # Assert that update_port is called with the binding:profile
+ # corresponding to the PCI device specified including MAC address.
+ update_port_mock.assert_called_once_with(
+ uuids.port,
+ {
+ 'port': {
+ constants.BINDING_HOST_ID: 'fake-host',
+ 'device_owner': 'compute:%s' % instance.availability_zone,
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:02',
+ 'physical_network': 'physnet1',
+ 'pci_vendor_info': '8086:154d',
+ 'device_mac_address': 'b4:96:91:34:f4:36',
+ },
+ }
+ },
+ )
+
+ @mock.patch(
+ 'nova.network.neutron.API.has_extended_resource_request_extension',
+ new=mock.Mock(return_value=False),
+ )
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_bindings_for_instance_with_resource_req(
self, get_client_mock):
@@ -7190,23 +7523,21 @@ class TestAPI(TestAPIBase):
request_id=uuids.pci_request_id)
bad_request = objects.InstancePCIRequest(
requester_id=uuids.wrong_port_id)
- device = objects.PciDevice(request_id=uuids.pci_request_id,
- address='fake-pci-address')
+ device = objects.PciDevice(request_id=uuids.pci_request_id)
bad_device = objects.PciDevice(request_id=uuids.wrong_request_id)
# Test the happy path
instance = objects.Instance(
pci_requests=objects.InstancePCIRequests(requests=[request]),
pci_devices=objects.PciDeviceList(objects=[device]))
self.assertEqual(
- 'fake-pci-address',
- self.api._get_port_pci_dev(
- self.context, instance, fake_port).address)
+ device,
+ self.api._get_port_pci_dev(instance, fake_port))
# Test not finding the request
instance = objects.Instance(
pci_requests=objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(bad_request)]))
self.assertIsNone(
- self.api._get_port_pci_dev(self.context, instance, fake_port))
+ self.api._get_port_pci_dev(instance, fake_port))
mock_debug.assert_called_with('No PCI request found for port %s',
uuids.fake_port_id, instance=instance)
mock_debug.reset_mock()
@@ -7215,7 +7546,7 @@ class TestAPI(TestAPIBase):
pci_requests=objects.InstancePCIRequests(requests=[request]),
pci_devices=objects.PciDeviceList(objects=[bad_device]))
self.assertIsNone(
- self.api._get_port_pci_dev(self.context, instance, fake_port))
+ self.api._get_port_pci_dev(instance, fake_port))
mock_debug.assert_called_with('No PCI device found for request %s',
uuids.pci_request_id, instance=instance)
@@ -7740,6 +8071,45 @@ class TestAPIPortbinding(TestAPIBase):
port_req_body['port'][
constants.BINDING_PROFILE])
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ def test_populate_neutron_extension_values_binding_sriov_pf(
+ self, mock_get_instance_pci_devs, mock_get_devspec
+ ):
+ host_id = 'my_host_id'
+ instance = {'host': host_id}
+ port_req_body = {'port': {}}
+
+ pci_dev = objects.PciDevice(
+ request_id=uuids.pci_req,
+ address='0000:01:00',
+ parent_addr='0000:02:00',
+ vendor_id='8086',
+ product_id='154d',
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'}
+ )
+
+ expected_profile = {
+ 'pci_vendor_info': '8086:154d',
+ 'pci_slot': '0000:01:00',
+ 'physical_network': 'physnet1',
+ 'device_mac_address': 'b4:96:91:34:f4:36',
+ }
+
+ mock_get_instance_pci_devs.return_value = [pci_dev]
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ mock_get_devspec.return_value = devspec
+
+ self.api._populate_neutron_binding_profile(
+ instance, uuids.pci_req, port_req_body, None)
+
+ self.assertEqual(
+ expected_profile,
+ port_req_body['port'][constants.BINDING_PROFILE]
+ )
+
@mock.patch.object(
pci_utils, 'get_vf_num_by_pci_address',
new=mock.MagicMock(side_effect=(lambda vf_a: 1
@@ -7867,21 +8237,29 @@ class TestAPIPortbinding(TestAPIBase):
devspec.get_tags.return_value = {'physical_network': 'physnet1'}
mock_get_pci_device_devspec.return_value = devspec
- pci_dev = {'vendor_id': 'a2d6',
- 'product_id': '15b3',
- 'address': '0000:0a:00.0',
- 'card_serial_number': 'MT2113X00000',
- 'dev_type': obj_fields.PciDeviceType.SRIOV_PF,
- }
- PciDevice = collections.namedtuple('PciDevice',
- ['vendor_id', 'product_id', 'address',
- 'card_serial_number', 'dev_type'])
- mydev = PciDevice(**pci_dev)
+ pci_dev = objects.PciDevice(
+ request_id=uuids.pci_req,
+ address='0000:0a:00.0',
+ parent_addr='0000:02:00',
+ vendor_id='a2d6',
+ product_id='15b3',
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={
+ 'capabilities': jsonutils.dumps(
+ {'card_serial_number': 'MT2113X00000'}),
+ 'mac_address': 'b4:96:91:34:f4:36',
+ },
- self.assertEqual({'pci_slot': '0000:0a:00.0',
- 'pci_vendor_info': 'a2d6:15b3',
- 'physical_network': 'physnet1'},
- self.api._get_pci_device_profile(mydev))
+ )
+ self.assertEqual(
+ {
+ 'pci_slot': '0000:0a:00.0',
+ 'pci_vendor_info': 'a2d6:15b3',
+ 'physical_network': 'physnet1',
+ 'device_mac_address': 'b4:96:91:34:f4:36',
+ },
+ self.api._get_pci_device_profile(pci_dev),
+ )
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index 31797f8133..e51b5c3368 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -615,6 +615,30 @@ class _TestRequestSpecObject(object):
self.assertIsInstance(req_obj.instance_group, objects.InstanceGroup)
self.assertEqual('fresh', req_obj.instance_group.name)
+ @mock.patch.object(
+ request_spec.RequestSpec, '_get_by_instance_uuid_from_db'
+ )
+ @mock.patch('nova.objects.InstanceGroup.get_by_uuid')
+ def test_get_by_instance_uuid_deleted_group(
+ self, mock_get_ig, get_by_uuid
+ ):
+ fake_spec_obj = fake_request_spec.fake_spec_obj()
+ fake_spec_obj.scheduler_hints['group'] = ['fresh']
+ fake_spec = fake_request_spec.fake_db_spec(fake_spec_obj)
+ get_by_uuid.return_value = fake_spec
+ mock_get_ig.side_effect = exception.InstanceGroupNotFound(
+ group_uuid=uuids.instgroup
+ )
+
+ req_obj = request_spec.RequestSpec.get_by_instance_uuid(
+ self.context, fake_spec['instance_uuid']
+ )
+ # assert that both the instance_group object and scheduler hint
+ # are cleared if the instance_group was deleted since the request
+ # spec was last saved to the db.
+ self.assertIsNone(req_obj.instance_group, objects.InstanceGroup)
+ self.assertEqual({'hint': ['over-there']}, req_obj.scheduler_hints)
+
@mock.patch('nova.objects.request_spec.RequestSpec.save')
@mock.patch.object(
request_spec.RequestSpec, '_get_by_instance_uuid_from_db')
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
index 804b76ffba..b88cfd19ef 100644
--- a/nova/tests/unit/pci/test_stats.py
+++ b/nova/tests/unit/pci/test_stats.py
@@ -98,16 +98,7 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsTestCase, self).setUp()
- self._setup_pci_stats()
-
- def _setup_pci_stats(self, numa_topology=None):
- """Exists for tests that need to setup pci_stats with a specific NUMA
- topology, while still allowing tests that don't care to get the default
- "empty" one.
- """
- if not numa_topology:
- numa_topology = objects.NUMATopology()
- self.pci_stats = stats.PciDeviceStats(numa_topology)
+ self.pci_stats = stats.PciDeviceStats(objects.NUMATopology())
# The following two calls need to be made before adding the devices.
patcher = fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
@@ -240,18 +231,18 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
def test_filter_pools_for_socket_affinity_no_socket(self):
- self._setup_pci_stats(
- objects.NUMATopology(
- cells=[objects.NUMACell(socket=None)]))
+ self.pci_stats.numa_topology = objects.NUMATopology(
+ cells=[objects.NUMACell(socket=None)])
+
self.assertEqual(
[],
self.pci_stats._filter_pools_for_socket_affinity(
self.pci_stats.pools, [objects.InstanceNUMACell()]))
def test_filter_pools_for_socket_affinity(self):
- self._setup_pci_stats(
- objects.NUMATopology(
- cells=[objects.NUMACell(id=1, socket=1)]))
+ self.pci_stats.numa_topology = objects.NUMATopology(
+ cells=[objects.NUMACell(id=1, socket=1)])
+
pools = self.pci_stats._filter_pools_for_socket_affinity(
self.pci_stats.pools, [objects.InstanceNUMACell(id=1)])
self.assertEqual(1, len(pools))
diff --git a/nova/tests/unit/policies/test_servers.py b/nova/tests/unit/policies/test_servers.py
index 3ed4bfe085..2130c62e5f 100644
--- a/nova/tests/unit/policies/test_servers.py
+++ b/nova/tests/unit/policies/test_servers.py
@@ -1229,10 +1229,9 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.API._allow_resize_to_same_host')
@mock.patch('nova.objects.RequestSpec.get_by_instance_uuid')
@mock.patch('nova.objects.Instance.save')
- @mock.patch('nova.api.openstack.common.get_instance')
@mock.patch('nova.conductor.ComputeTaskAPI.resize_instance')
def test_cross_cell_resize_server_policy(
- self, mock_resize, mock_get, mock_save, mock_rs, mock_allow, m_net
+ self, mock_resize, mock_save, mock_rs, mock_allow, m_net
):
# 'migrate' policy is checked before 'resize:cross_cell' so
@@ -1262,7 +1261,7 @@ class ServersPolicyTest(base.BasePolicyTest):
)
return inst
- mock_get.side_effect = fake_get
+ self.mock_get.side_effect = fake_get
def fake_validate(context, instance,
host_name, allow_cross_cell_resize):
diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py
index 0650c62096..9b2f5c3a0a 100644
--- a/nova/tests/unit/scheduler/client/test_report.py
+++ b/nova/tests/unit/scheduler/client/test_report.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
+import ddt
import time
from urllib import parse
@@ -150,6 +151,60 @@ class SafeConnectedTestCase(test.NoDBTestCase):
self.assertTrue(req.called)
+@ddt.ddt
+class TestSingleton(test.NoDBTestCase):
+ def test_singleton(self):
+ # Make sure we start with a clean slate
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Make sure the first call creates the singleton, sets it
+ # globally, and returns it
+ client = report.report_client_singleton()
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure that a subsequent call returns the same thing
+ # again and that the global is unchanged
+ self.assertEqual(client, report.report_client_singleton())
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ ks_exc.DiscoveryFailure,
+ ks_exc.ConnectFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ test.TestingException)
+ def test_errors(self, exc):
+ self._test_error(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_error(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ self.assertRaises(exc, report.report_client_singleton)
+ mock_log.error.assert_called_once()
+
+ def test_error_then_success(self):
+ # Simulate an error
+ self._test_error(ks_exc.ConnectFailure)
+
+ # Ensure we did not set the global client
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Call again, with no error
+ client = report.report_client_singleton()
+
+ # Make sure we got a client and that it was set as the global
+ # one
+ self.assertIsNotNone(client)
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure we keep getting the same one
+ client2 = report.report_client_singleton()
+ self.assertEqual(client, client2)
+
+
class TestConstructor(test.NoDBTestCase):
def setUp(self):
super(TestConstructor, self).setUp()
diff --git a/nova/tests/unit/test_metadata.py b/nova/tests/unit/test_metadata.py
index 630cb54418..1c78ddea51 100644
--- a/nova/tests/unit/test_metadata.py
+++ b/nova/tests/unit/test_metadata.py
@@ -1458,20 +1458,17 @@ class MetadataHandlerTestCase(test.TestCase):
for c in range(ord('a'), ord('z'))]
mock_client.list_subnets.return_value = {
'subnets': subnet_list}
+ mock_client.list_ports.side_effect = fake_list_ports
- with mock.patch.object(
- mock_client, 'list_ports',
- side_effect=fake_list_ports) as mock_list_ports:
-
- response = fake_request(
- self, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
- headers={'X-Forwarded-For': '192.192.192.2',
- 'X-Metadata-Provider': proxy_lb_id})
-
- self.assertEqual(3, mock_list_ports.call_count)
+ response = fake_request(
+ self, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Metadata-Provider': proxy_lb_id})
+
+ self.assertEqual(3, mock_client.list_ports.call_count)
self.assertEqual(200, response.status_int)
diff --git a/nova/tests/unit/test_service_auth.py b/nova/tests/unit/test_service_auth.py
index db2a2e2899..ceb2a93b02 100644
--- a/nova/tests/unit/test_service_auth.py
+++ b/nova/tests/unit/test_service_auth.py
@@ -55,3 +55,13 @@ class ServiceAuthTestCase(test.NoDBTestCase):
result = service_auth.get_auth_plugin(self.ctx)
self.assertEqual(1, mock_load.call_count)
self.assertNotIsInstance(result, service_token.ServiceTokenAuthWrapper)
+
+ @mock.patch.object(ks_loading, 'load_auth_from_conf_options',
+ new=mock.Mock())
+ def test_get_auth_plugin_user_auth(self):
+ self.flags(send_service_user_token=True, group='service_user')
+ user_auth = mock.Mock()
+
+ result = service_auth.get_auth_plugin(self.ctx, user_auth=user_auth)
+
+ self.assertEqual(user_auth, result.user_auth)
diff --git a/nova/tests/unit/test_test.py b/nova/tests/unit/test_test.py
index 8381792de6..5642a6da74 100644
--- a/nova/tests/unit/test_test.py
+++ b/nova/tests/unit/test_test.py
@@ -361,21 +361,6 @@ class PatchExistsTestCase(test.NoDBTestCase):
self.assertTrue(os.path.exists(os.path.dirname(__file__)))
self.assertFalse(os.path.exists('non-existent/file'))
- @test.patch_exists('fake_file1', True)
- @test.patch_exists('fake_file2', True)
- @test.patch_exists(__file__, False)
- def test_patch_exists_multiple_decorators(self):
- """Test that @patch_exists can be used multiple times on the
- same method.
- """
- self.assertTrue(os.path.exists('fake_file1'))
- self.assertTrue(os.path.exists('fake_file2'))
- self.assertFalse(os.path.exists(__file__))
-
- # Check non-patched parameters
- self.assertTrue(os.path.exists(os.path.dirname(__file__)))
- self.assertFalse(os.path.exists('non-existent/file'))
-
class PatchOpenTestCase(test.NoDBTestCase):
fake_contents = "These file contents don't really exist"
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
index dd4dc52d5b..0110b595c7 100644
--- a/nova/tests/unit/virt/hyperv/test_vmops.py
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -1129,7 +1129,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_unplug_vifs.assert_called_once_with(
mock_instance, mock.sentinel.fake_network_info)
mock_disconnect_volumes.assert_called_once_with(
- mock.sentinel.FAKE_BD_INFO)
+ mock.sentinel.FAKE_BD_INFO, force=True)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
@@ -1374,12 +1374,10 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
def test_get_vm_state(self):
summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED}
- with mock.patch.object(self._vmops._vmutils,
- 'get_vm_summary_info') as mock_get_summary_info:
- mock_get_summary_info.return_value = summary_info
+ self._vmops._vmutils.get_vm_summary_info.return_value = summary_info
- response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
- self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
+ response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
+ self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
@@ -1418,12 +1416,11 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
- with mock.patch.object(self._vmops._vmutils,
- 'list_instance_notes') as mock_list_notes:
- mock_list_notes.return_value = [('fake_name', [fake_uuid])]
+ self._vmops._vmutils.list_instance_notes.return_value = (
+ [('fake_name', [fake_uuid])])
- response = self._vmops.list_instance_uuids()
- mock_list_notes.assert_called_once_with()
+ response = self._vmops.list_instance_uuids()
+ self._vmops._vmutils.list_instance_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])
diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py
index da7262085d..4a088b6030 100644
--- a/nova/tests/unit/virt/hyperv/test_volumeops.py
+++ b/nova/tests/unit/virt/hyperv/test_volumeops.py
@@ -140,7 +140,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
self._volumeops.disconnect_volumes(block_device_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
- block_device_mapping[0]['connection_info'])
+ block_device_mapping[0]['connection_info'], force=False)
+
+ # Verify force=True
+ fake_volume_driver.disconnect_volume.reset_mock()
+ self._volumeops.disconnect_volumes(block_device_info, force=True)
+ fake_volume_driver.disconnect_volume.assert_called_once_with(
+ block_device_mapping[0]['connection_info'], force=True)
@mock.patch('time.sleep')
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
@@ -180,7 +186,7 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
if attach_failed:
fake_volume_driver.disconnect_volume.assert_called_once_with(
- fake_conn_info)
+ fake_conn_info, force=False)
mock_sleep.assert_has_calls(
[mock.call(CONF.hyperv.volume_attach_retry_interval)] *
CONF.hyperv.volume_attach_retry_count)
@@ -202,7 +208,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_volume_driver.assert_called_once_with(
mock.sentinel.conn_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_info)
+ mock.sentinel.conn_info, force=False)
+
+ # Verify force=True
+ fake_volume_driver.disconnect_volume.reset_mock()
+ self._volumeops.disconnect_volume(mock.sentinel.conn_info, force=True)
+ fake_volume_driver.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_info, force=True)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_detach_volume(self, mock_get_volume_driver):
@@ -346,7 +358,13 @@ class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase):
self._base_vol_driver.disconnect_volume(conn_info)
self._conn.disconnect_volume.assert_called_once_with(
- conn_info['data'])
+ conn_info['data'], force=False)
+
+ # Verify force=True
+ self._conn.disconnect_volume.reset_mock()
+ self._base_vol_driver.disconnect_volume(conn_info, force=True)
+ self._conn.disconnect_volume.assert_called_once_with(
+ conn_info['data'], force=True)
@mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_res_path')
def _test_get_disk_resource_path_by_conn_info(self,
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 7b377b21c2..0b1cc7d47f 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -2597,9 +2597,6 @@ class IronicDriverSyncTestCase(IronicDriverTestCase):
# that the thread completes.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
- self.mock_conn = self.useFixture(
- fixtures.MockPatchObject(self.driver, '_ironic_connection')).mock
-
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
def test_rescue(self, mock_sps, mock_looping):
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
index 396edfd024..c7577745ab 100644
--- a/nova/tests/unit/virt/libvirt/test_config.py
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -3135,6 +3135,32 @@ class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
config.LibvirtConfigNodeDeviceMdevInformation)
self.assertEqual("nvidia-11", obj.mdev_information.type)
self.assertEqual(12, obj.mdev_information.iommu_group)
+ self.assertIsNone(obj.mdev_information.uuid)
+
+ def test_config_mdev_device_uuid(self):
+ xmlin = """
+ <device>
+ <name>mdev_b2107403_110c_45b0_af87_32cc91597b8a_0000_41_00_0</name>
+ <path>/sys/devices/pci0000:40/0000:40:03.1/0000:41:00.0/b2107403-110c-45b0-af87-32cc91597b8a</path>
+ <parent>pci_0000_41_00_0</parent>
+ <driver>
+ <name>vfio_mdev</name>
+ </driver>
+ <capability type='mdev'>
+ <type id='nvidia-442'/>
+ <uuid>b2107403-110c-45b0-af87-32cc91597b8a</uuid>
+ <iommuGroup number='57'/>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+ self.assertIsInstance(obj.mdev_information,
+ config.LibvirtConfigNodeDeviceMdevInformation)
+ self.assertEqual("nvidia-442", obj.mdev_information.type)
+ self.assertEqual(57, obj.mdev_information.iommu_group)
+ self.assertEqual("b2107403-110c-45b0-af87-32cc91597b8a",
+ obj.mdev_information.uuid)
def test_config_vdpa_device(self):
xmlin = """
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 5632fcba86..0eada9ee14 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -740,16 +740,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'resolve_driver_format',
imagebackend.Image._get_driver_format)
- self.useFixture(nova_fixtures.LibvirtFixture())
+ self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
# ensure tests perform the same on all host architectures; this is
# already done by the fakelibvirt fixture but we want to change the
# architecture in some tests
- _p = mock.patch('os.uname')
- self.mock_uname = _p.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.X86_64)
- self.addCleanup(_p.stop)
self.test_instance = _create_test_instance()
network_info = objects.InstanceInfoCache(
@@ -2260,6 +2258,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref.info_cache = objects.InstanceInfoCache(
network_info=network_info)
+ pci_utils.get_mac_by_pci_address.side_effect = None
+ pci_utils.get_mac_by_pci_address.return_value = 'da:d1:f2:91:95:c1'
with test.nested(
mock.patch('nova.objects.VirtualInterfaceList'
'.get_by_instance_uuid', return_value=vifs),
@@ -2269,8 +2269,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value=guest),
mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc',
return_value=xml),
- mock.patch.object(pci_utils, 'get_mac_by_pci_address',
- return_value='da:d1:f2:91:95:c1')):
+ ):
metadata_obj = drvr._build_device_metadata(self.context,
instance_ref)
metadata = metadata_obj.devices
@@ -6974,14 +6973,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[5].rate_bytes, 1024)
self.assertEqual(cfg.devices[5].rate_period, 2)
- @mock.patch('nova.virt.libvirt.driver.os.path.exists')
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
- def test_get_guest_config_with_rng_backend(self, mock_path):
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
+ def test_get_guest_config_with_rng_backend(self):
self.flags(virt_type='kvm',
rng_dev_path='/dev/hw_rng',
group='libvirt')
self.flags(pointer_model='ps2mouse')
- mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
@@ -7591,11 +7588,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
- @mock.patch('os.path.exists', return_value=True)
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
- def test_get_guest_config_aarch64(
- self, mock_path_exists, mock_numa, mock_storage,
- ):
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
+ def test_get_guest_config_aarch64(self, mock_numa, mock_storage):
TEST_AMOUNT_OF_PCIE_SLOTS = 8
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
group='libvirt')
@@ -7615,7 +7609,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self),
image_meta, disk_info)
- self.assertTrue(mock_path_exists.called)
self.assertEqual(cfg.os_mach_type, "virt")
num_ports = 0
@@ -7632,10 +7625,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
- @mock.patch('os.path.exists', return_value=True)
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
def test_get_guest_config_aarch64_with_graphics(
- self, mock_path_exists, mock_numa, mock_storage,
+ self, mock_numa, mock_storage,
):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.AARCH64)
@@ -7645,7 +7637,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = self._get_guest_config_with_graphics()
- self.assertTrue(mock_path_exists.called)
self.assertEqual(cfg.os_mach_type, "virt")
usbhost_exists = False
@@ -9231,7 +9222,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._disconnect_volume(
self.context, fake_connection_info, fake_instance_1)
mock_volume_driver.disconnect_volume.assert_called_once_with(
- fake_connection_info, fake_instance_1)
+ fake_connection_info, fake_instance_1, force=False)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@@ -9605,7 +9596,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
device_name='vdc',
),
mock.call.detach_encryptor(**encryption),
- mock.call.disconnect_volume(connection_info, instance)])
+ mock.call.disconnect_volume(
+ connection_info,
+ instance,
+ force=False,
+ )
+ ])
get_device_conf_func = mock_detach_with_retry.mock_calls[0][1][2]
self.assertEqual(mock_guest.get_disk, get_device_conf_func.func)
self.assertEqual(('vdc',), get_device_conf_func.args)
@@ -11410,13 +11406,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
- '_assert_dest_node_has_enough_disk')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def test_check_can_live_migration_source_disk_over_commit_none(self,
- mock_check, mock_shared_block, mock_enough, mock_disk_check):
+ mock_check, mock_shared_block, mock_disk_check):
mock_check.return_value = False
mock_shared_block.return_value = False
@@ -15548,8 +15542,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
filename=filename, size=100 * units.Gi, ephemeral_size=mock.ANY,
specified_fs=None)
- @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
- def test_create_image_resize_snap_backend(self, mock_cache):
+ def test_create_image_resize_snap_backend(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance.task_state = task_states.RESIZE_FINISH
@@ -16008,9 +16001,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warning')
- @mock.patch('nova.compute.utils.get_machine_ips')
- def test_check_my_ip(self, mock_ips, mock_log):
- mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
+ def test_check_my_ip(self, mock_log):
+
+ self.libvirt.mock_get_machine_ips.return_value = [
+ '8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._check_my_ip()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
@@ -16032,6 +16026,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -16039,8 +16034,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
@@ -16055,6 +16048,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -16062,8 +16056,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -16083,11 +16075,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -17575,12 +17566,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
got = drvr._get_cpu_info()
self.assertEqual(want, got)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
@mock.patch.object(host.Host, 'list_pci_devices',
return_value=['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7'])
- def test_get_pci_passthrough_devices(self, mock_list, mock_get_ifname):
+ def test_get_pci_passthrough_devices(self, mock_list):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -17614,7 +17604,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None,
- "numa_node": None},
+ "numa_node": None,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
+ },
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
@@ -17650,7 +17643,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# The first call for every VF is to determine parent_ifname and
# the second call to determine the MAC address.
- mock_get_ifname.assert_has_calls([
+ pci_utils.get_ifname_by_pci_address.assert_has_calls([
mock.call('0000:04:10.7', pf_interface=True),
mock.call('0000:04:11.7', pf_interface=True),
])
@@ -19823,16 +19816,64 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.context,
mock.sentinel.connection_info,
instance,
- destroy_secrets=False
+ destroy_secrets=False,
+ force=True
),
mock.call(
self.context,
mock.sentinel.connection_info,
instance,
- destroy_secrets=True
+ destroy_secrets=True,
+ force=True
)
])
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
+ @mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._should_disconnect_target',
+ new=mock.Mock(return_value=True))
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._detach_encryptor',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vpmems',
+ new=mock.Mock(return_value=None))
+ def test_cleanup_disconnect_volume(self, mock_vol_driver):
+ """Verify that we call disconnect_volume() with force=True
+
+ cleanup() is called by destroy() when an instance is being deleted and
+ force=True should be passed down to os-brick's disconnect_volume()
+ call, which will ensure removal of devices regardless of errors.
+
+ We need to ensure that devices are removed when an instance is being
+ deleted to avoid leaving leftover devices that could later be
+ erroneously connected by external entities (example: multipathd) to
+ instances that should not have access to the volumes.
+
+ See https://bugs.launchpad.net/nova/+bug/2004555 for details.
+ """
+ connection_info = mock.MagicMock()
+ block_device_info = {
+ 'block_device_mapping': [
+ {
+ 'connection_info': connection_info
+ }
+ ]
+ }
+ instance = objects.Instance(self.context, **self.test_instance)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ drvr.cleanup(
+ self.context,
+ instance,
+ network_info={},
+ block_device_info=block_device_info,
+ destroy_vifs=False,
+ destroy_disks=False,
+ )
+ mock_vol_driver.return_value.disconnect_volume.assert_called_once_with(
+ connection_info, instance, force=True)
+
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_allow_native_luksv1')
def test_swap_volume_native_luks_blocked(self, mock_allow_native_luksv1,
@@ -22060,11 +22101,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
- '._get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
- def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
- mock_get_disk_info):
+ def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get):
mappings = [
{
'device_name': '/dev/sdb4',
@@ -22111,7 +22149,6 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
- mock_get_disk_info.return_value = fake_disk_info_json(instance)
self.assertRaises(
exception.InstanceFaultRollback,
@@ -25561,9 +25598,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
self._test_get_gpu_inventories(drvr, expected, ['nvidia-11'])
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
- '._get_mdev_capable_devices')
- def test_get_gpu_inventories_with_two_types(self, get_mdev_capable_devs):
+ def test_get_gpu_inventories_with_two_types(self):
self.flags(enabled_mdev_types=['nvidia-11', 'nvidia-12'],
group='devices')
# we need to call the below again to ensure the updated
@@ -28510,13 +28545,11 @@ class LVMSnapshotTests(_BaseSnapshotTests):
new=mock.Mock(return_value=None))
@mock.patch('nova.virt.libvirt.utils.get_disk_type_from_path',
new=mock.Mock(return_value='lvm'))
- @mock.patch('nova.virt.libvirt.utils.file_open',
- side_effect=[io.BytesIO(b''), io.BytesIO(b'')])
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image')
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
- mock_convert_image, mock_file_open):
+ mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
diff --git a/nova/tests/unit/virt/libvirt/test_guest.py b/nova/tests/unit/virt/libvirt/test_guest.py
index 70d438d816..47e9ba4b62 100644
--- a/nova/tests/unit/virt/libvirt/test_guest.py
+++ b/nova/tests/unit/virt/libvirt/test_guest.py
@@ -1040,3 +1040,25 @@ class JobInfoTestCase(test.NoDBTestCase):
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
+
+ @mock.patch.object(fakelibvirt.virDomain, "jobInfo")
+ @mock.patch.object(fakelibvirt.virDomain, "jobStats")
+ def test_job_stats_no_ram(self, mock_stats, mock_info):
+ mock_stats.side_effect = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error: migration was active, but no RAM info was set",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ error_message="migration was active, but no RAM info was set")
+
+ info = self.guest.get_job_info()
+
+ self.assertIsInstance(info, libvirt_guest.JobInfo)
+ self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_NONE, info.type)
+ self.assertEqual(0, info.time_elapsed)
+ self.assertEqual(0, info.time_remaining)
+ self.assertEqual(0, info.memory_total)
+ self.assertEqual(0, info.memory_processed)
+ self.assertEqual(0, info.memory_remaining)
+
+ mock_stats.assert_called_once_with()
+ self.assertFalse(mock_info.called)
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index d71d13ab37..a46a3e46a5 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -16,6 +16,7 @@
import os
+import ddt
import eventlet
from eventlet import greenthread
from eventlet import tpool
@@ -71,11 +72,10 @@ class HostTestCase(test.NoDBTestCase):
self.useFixture(nova_fixtures.LibvirtFixture())
self.host = host.Host("qemu:///system")
- @mock.patch("nova.virt.libvirt.host.Host._init_events")
- def test_repeat_initialization(self, mock_init_events):
+ def test_repeat_initialization(self):
for i in range(3):
self.host.initialize()
- mock_init_events.assert_called_once_with()
+ self.host._init_events.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback(self, mock_close):
@@ -1113,8 +1113,9 @@ Active: 8381604 kB
expect_vf = ["rx", "tx", "sg", "tso", "gso", "gro", "rxvlan", "txvlan"]
self.assertEqual(expect_vf, actualvf)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- def test_get_pcidev_info_non_nic(self, mock_get_ifname):
+ def test_get_pcidev_info_non_nic(self):
+ pci_utils.get_mac_by_pci_address.side_effect = (
+ exception.PciDeviceNotFoundById('0000:04:00.3'))
dev_name = "pci_0000_04_11_7"
pci_dev = fakelibvirt.NodeDevice(
self.host._get_connection(),
@@ -1128,11 +1129,10 @@ Active: 8381604 kB
'parent_addr': '0000:04:00.3',
}
self.assertEqual(expect_vf, actual_vf)
- mock_get_ifname.assert_not_called()
+ pci_utils.get_ifname_by_pci_address.assert_not_called()
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
- def test_get_pcidev_info(self, mock_get_ifname):
+ def test_get_pcidev_info(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
devs = {
"pci_0000_04_00_3", "pci_0000_04_10_7", "pci_0000_04_11_7",
"pci_0000_04_00_1", "pci_0000_03_00_0", "pci_0000_03_00_1",
@@ -1156,9 +1156,9 @@ Active: 8381604 kB
dev for dev in node_devs.values() if dev.name() in devs]
name = "pci_0000_04_00_3"
- actual_vf = self.host._get_pcidev_info(
+ actual_pf = self.host._get_pcidev_info(
name, node_devs[name], net_devs, [], [])
- expect_vf = {
+ expect_pf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
@@ -1166,8 +1166,10 @@ Active: 8381604 kB
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
- self.assertEqual(expect_vf, actual_vf)
+ self.assertEqual(expect_pf, actual_pf)
name = "pci_0000_04_10_7"
actual_vf = self.host._get_pcidev_info(
@@ -1222,9 +1224,9 @@ Active: 8381604 kB
self.assertEqual(expect_vf, actual_vf)
name = "pci_0000_03_00_0"
- actual_vf = self.host._get_pcidev_info(
+ actual_pf = self.host._get_pcidev_info(
name, node_devs[name], net_devs, [], [])
- expect_vf = {
+ expect_pf = {
"dev_id": "pci_0000_03_00_0",
"address": "0000:03:00.0",
"product_id": '1013',
@@ -1232,13 +1234,15 @@ Active: 8381604 kB
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
- self.assertEqual(expect_vf, actual_vf)
+ self.assertEqual(expect_pf, actual_pf)
name = "pci_0000_03_00_1"
- actual_vf = self.host._get_pcidev_info(
+ actual_pf = self.host._get_pcidev_info(
name, node_devs[name], net_devs, [], [])
- expect_vf = {
+ expect_pf = {
"dev_id": "pci_0000_03_00_1",
"address": "0000:03:00.1",
"product_id": '1013',
@@ -1246,8 +1250,10 @@ Active: 8381604 kB
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
- self.assertEqual(expect_vf, actual_vf)
+ self.assertEqual(expect_pf, actual_pf)
# Parent PF with a VPD cap.
name = "pci_0000_82_00_0"
@@ -1264,6 +1270,8 @@ Active: 8381604 kB
"capabilities": {
# Should be obtained from the parent PF in this case.
"vpd": {"card_serial_number": "MT2113X00000"}},
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
self.assertEqual(expect_pf, actual_pf)
@@ -1928,6 +1936,7 @@ class TestLibvirtSEV(test.NoDBTestCase):
self.host = host.Host("qemu:///system")
+@ddt.ddt
class TestLibvirtSEVUnsupported(TestLibvirtSEV):
@mock.patch.object(os.path, 'exists', return_value=False)
def test_kernel_parameter_missing(self, fake_exists):
@@ -1935,19 +1944,26 @@ class TestLibvirtSEVUnsupported(TestLibvirtSEV):
fake_exists.assert_called_once_with(
'/sys/module/kvm_amd/parameters/sev')
+ @ddt.data(
+ ('0\n', False),
+ ('N\n', False),
+ ('1\n', True),
+ ('Y\n', True),
+ )
+ @ddt.unpack
@mock.patch.object(os.path, 'exists', return_value=True)
- @mock.patch('builtins.open', mock.mock_open(read_data="0\n"))
- def test_kernel_parameter_zero(self, fake_exists):
- self.assertFalse(self.host._kernel_supports_amd_sev())
- fake_exists.assert_called_once_with(
- '/sys/module/kvm_amd/parameters/sev')
-
- @mock.patch.object(os.path, 'exists', return_value=True)
- @mock.patch('builtins.open', mock.mock_open(read_data="1\n"))
- def test_kernel_parameter_one(self, fake_exists):
- self.assertTrue(self.host._kernel_supports_amd_sev())
- fake_exists.assert_called_once_with(
- '/sys/module/kvm_amd/parameters/sev')
+ def test_kernel_parameter(
+ self, sev_param_value, expected_support, mock_exists
+ ):
+ with mock.patch(
+ 'builtins.open', mock.mock_open(read_data=sev_param_value)
+ ):
+ self.assertIs(
+ expected_support,
+ self.host._kernel_supports_amd_sev()
+ )
+ mock_exists.assert_called_once_with(
+ '/sys/module/kvm_amd/parameters/sev')
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch('builtins.open', mock.mock_open(read_data="1\n"))
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
index 43504efeb5..697300b9cf 100644
--- a/nova/tests/unit/virt/libvirt/test_vif.py
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -517,18 +517,17 @@ class LibvirtVifTestCase(test.NoDBTestCase):
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
- self.useFixture(nova_fixtures.LibvirtFixture(stub_os_vif=False))
+ self.libvirt = self.useFixture(
+ nova_fixtures.LibvirtFixture(stub_os_vif=False))
# os_vif.initialize is typically done in nova-compute startup
os_vif.initialize()
self.setup_os_vif_objects()
# multiqueue configuration is host OS specific
- _a = mock.patch('os.uname')
- self.mock_uname = _a.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.10.13-200-generic', '', 'x86_64')
- self.addCleanup(_a.stop)
def _get_node(self, xml):
doc = etree.fromstring(xml)
@@ -983,14 +982,9 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.vif_bridge,
self.vif_bridge['network']['bridge'])
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- @mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
- @mock.patch('nova.privsep.linux_net.set_device_macaddr')
- @mock.patch('nova.privsep.linux_net.set_device_macaddr_and_vlan')
- def _test_hw_veb_op(self, op, vlan, mock_set_macaddr_and_vlan,
- mock_set_macaddr, mock_get_vf_num,
- mock_get_ifname):
- mock_get_ifname.side_effect = ['eth1', 'eth13']
+ def _test_hw_veb_op(self, op, vlan):
+ self.libvirt.mock_get_vf_num_by_pci_address.return_value = 1
+ pci_utils.get_ifname_by_pci_address.side_effect = ['eth1', 'eth13']
vlan_id = int(vlan)
port_state = 'up' if vlan_id > 0 else 'down'
mac = ('00:00:00:00:00:00' if op.__name__ == 'unplug'
@@ -1005,10 +999,13 @@ class LibvirtVifTestCase(test.NoDBTestCase):
'set_macaddr': [mock.call('eth13', mac, port_state=port_state)]
}
op(self.instance, self.vif_hw_veb_macvtap)
- mock_get_ifname.assert_has_calls(calls['get_ifname'])
- mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
- mock_set_macaddr.assert_has_calls(calls['set_macaddr'])
- mock_set_macaddr_and_vlan.assert_called_once_with(
+ pci_utils.get_ifname_by_pci_address.assert_has_calls(
+ calls['get_ifname'])
+ self.libvirt.mock_get_vf_num_by_pci_address.assert_has_calls(
+ calls['get_vf_num'])
+ self.libvirt.mock_set_device_macaddr.assert_has_calls(
+ calls['set_macaddr'])
+ self.libvirt.mock_set_device_macaddr_and_vlan.assert_called_once_with(
'eth1', 1, mock.ANY, vlan_id)
def test_plug_hw_veb(self):
@@ -1218,9 +1215,8 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_hostdev_physical)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='eth1')
- def test_hw_veb_driver_macvtap(self, mock_get_ifname):
+ def test_hw_veb_driver_macvtap(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'eth1'
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap)
node = self._get_node(xml)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
index 89a59f2f1a..f0d403e300 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
@@ -81,3 +81,23 @@ class LibvirtFibreChannelVolumeDriverTestCase(
self.assertEqual(requested_size, new_size)
libvirt_driver.connector.extend_volume.assert_called_once_with(
connection_info['data'])
+
+ def test_disconnect_volume(self):
+ device_path = '/dev/fake-dev'
+ connection_info = {'data': {'device_path': device_path}}
+
+ libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
+ self.fake_host)
+ libvirt_driver.connector.disconnect_volume = mock.MagicMock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance)
+
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], connection_info['data'], force=False)
+
+ # Verify force=True
+ libvirt_driver.connector.disconnect_volume.reset_mock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance, force=True)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], connection_info['data'], force=True)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
index f8a64abea5..540c9c822d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
@@ -57,10 +57,19 @@ class LibvirtISCSIVolumeDriverTestCase(
device=device_path))
libvirt_driver.disconnect_volume(connection_info,
mock.sentinel.instance)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], None, force=False)
msg = mock_LOG_warning.call_args_list[0]
self.assertIn('Ignoring VolumeDeviceNotFound', msg[0][0])
+ # Verify force=True
+ libvirt_driver.connector.disconnect_volume.reset_mock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance, force=True)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], None, force=True)
+
def test_extend_volume(self):
device_path = '/dev/fake-dev'
connection_info = {'data': {'device_path': device_path}}
diff --git a/nova/tests/unit/virt/libvirt/volume/test_lightos.py b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
index 554647acf4..1eb9583d4c 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_lightos.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
@@ -30,7 +30,7 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
device_scan_attempts=5)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
- new=mock.Mock(return_value=mock.Mock()))
+ new=mock.Mock())
def test_libvirt_lightos_driver_connect(self):
lightos_driver = lightos.LibvirtLightOSVolumeDriver(
self.fake_host)
@@ -40,15 +40,16 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
'name': 'aLightVolume',
'conf': config}
connection_info = {'data': disk_info}
- with mock.patch.object(lightos_driver.connector,
- 'connect_volume',
- return_value={'path': '/dev/dms1234567'}):
- lightos_driver.connect_volume(connection_info, None)
- (lightos_driver.connector.connect_volume.
- assert_called_once_with(
- connection_info['data']))
- self.assertEqual('/dev/dms1234567',
- connection_info['data']['device_path'])
+ lightos_driver.connector.connect_volume.return_value = (
+ {'path': '/dev/dms1234567'})
+
+ lightos_driver.connect_volume(connection_info, None)
+
+ lightos_driver.connector.connect_volume.assert_called_once_with(
+ connection_info['data'])
+ self.assertEqual(
+ '/dev/dms1234567',
+ connection_info['data']['device_path'])
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
@@ -61,7 +62,13 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
connection_info = {'data': disk_info}
lightos_driver.disconnect_volume(connection_info, None)
lightos_driver.connector.disconnect_volume.assert_called_once_with(
- disk_info, None)
+ disk_info, None, force=False)
+
+ # Verify force=True
+ lightos_driver.connector.disconnect_volume.reset_mock()
+ lightos_driver.disconnect_volume(connection_info, None, force=True)
+ lightos_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nvme.py b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
index fcb303b4c3..2803903e9f 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nvme.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
@@ -56,14 +56,15 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
'name': 'aNVMEVolume',
'conf': config}
connection_info = {'data': disk_info}
- with mock.patch.object(nvme_driver.connector,
- 'connect_volume',
- return_value={'path': '/dev/dms1234567'}):
- nvme_driver.connect_volume(connection_info, None)
- nvme_driver.connector.connect_volume.assert_called_once_with(
- connection_info['data'])
- self.assertEqual('/dev/dms1234567',
- connection_info['data']['device_path'])
+ nvme_driver.connector.connect_volume.return_value = (
+ {'path': '/dev/dms1234567'})
+
+ nvme_driver.connect_volume(connection_info, None)
+
+ nvme_driver.connector.connect_volume.assert_called_once_with(
+ connection_info['data'])
+ self.assertEqual(
+ '/dev/dms1234567', connection_info['data']['device_path'])
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
@@ -76,7 +77,13 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
connection_info = {'data': disk_info}
nvme_driver.disconnect_volume(connection_info, None)
nvme_driver.connector.disconnect_volume.assert_called_once_with(
- disk_info, None)
+ disk_info, None, force=False)
+
+ # Verify force=True
+ nvme_driver.connector.disconnect_volume.reset_mock()
+ nvme_driver.disconnect_volume(connection_info, None, force=True)
+ nvme_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
index 6d9247cd2d..ed5ab08a6e 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
@@ -49,7 +49,13 @@ class LibvirtScaleIOVolumeDriverTestCase(
conn = {'data': mock.sentinel.conn_data}
sio.disconnect_volume(conn, mock.sentinel.instance)
sio.connector.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_data, None)
+ mock.sentinel.conn_data, None, force=False)
+
+ # Verify force=True
+ sio.connector.disconnect_volume.reset_mock()
+ sio.disconnect_volume(conn, mock.sentinel.instance, force=True)
+ sio.connector.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_data, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_storpool.py b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
index e14954f148..9ceac07260 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_storpool.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
@@ -53,9 +53,11 @@ class MockStorPoolConnector(object):
}
return {'type': 'block', 'path': test_attached[v]['path']}
- def disconnect_volume(self, connection_info, device_info):
+ def disconnect_volume(self, connection_info, device_info, **kwargs):
self.inst.assertIn('client_id', connection_info)
self.inst.assertIn('volume', connection_info)
+ self.inst.assertIn('force', kwargs)
+ self.inst.assertEqual(self.inst.force, kwargs.get('force'))
v = connection_info['volume']
if v not in test_attached:
@@ -86,6 +88,11 @@ class MockStorPoolInitiator(object):
class LibvirtStorPoolVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
+ def setUp(self):
+ super().setUp()
+ # This is for testing the force flag of disconnect_volume()
+ self.force = False
+
def mock_storpool(f):
def _config_inner_inner1(inst, *args, **kwargs):
@mock.patch(
@@ -175,3 +182,10 @@ class LibvirtStorPoolVolumeDriverTestCase(
libvirt_driver.disconnect_volume(ci_2, mock.sentinel.instance)
self.assertDictEqual({}, test_attached)
+
+ # Connect the volume again so we can detach it again
+ libvirt_driver.connect_volume(ci_2, mock.sentinel.instance)
+ # Verify force=True
+ self.force = True
+ libvirt_driver.disconnect_volume(
+ ci_2, mock.sentinel.instance, force=True)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
index 883cebb55a..032ceb4fe5 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
@@ -95,7 +95,13 @@ class LibvirtVZStorageTestCase(test_volume.LibvirtVolumeBaseTestCase):
conn = {'data': mock.sentinel.conn_data}
drv.disconnect_volume(conn, mock.sentinel.instance)
drv.connector.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_data, None)
+ mock.sentinel.conn_data, None, force=False)
+
+ # Verify force=True
+ drv.connector.disconnect_volume.reset_mock()
+ drv.disconnect_volume(conn, mock.sentinel.instance, force=True)
+ drv.connector.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_data, None, force=True)
def test_libvirt_vzstorage_driver_get_config(self):
libvirt_driver = vzstorage.LibvirtVZStorageVolumeDriver(self.fake_host)
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
index aff6c5ef19..94d9297ca3 100644
--- a/nova/tests/unit/virt/test_block_device.py
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -433,24 +433,23 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def _test_call_wait_func(self, delete_on_termination, delete_fail=False):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['delete_on_termination'] = delete_on_termination
- with mock.patch.object(self.volume_api, 'delete') as vol_delete:
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
-
- if delete_on_termination and delete_fail:
- vol_delete.side_effect = Exception()
-
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm._call_wait_func,
- context=self.context,
- wait_func=wait_func,
- volume_api=self.volume_api,
- volume_id='fake-id')
- self.assertEqual(delete_on_termination, vol_delete.called)
+ if delete_on_termination and delete_fail:
+ self.volume_api.delete.side_effect = Exception()
+
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm._call_wait_func,
+ context=self.context,
+ wait_func=wait_func,
+ volume_api=self.volume_api,
+ volume_id='fake-id')
+ self.assertEqual(delete_on_termination, self.volume_api.delete.called)
def test_call_wait_delete_volume(self):
self._test_call_wait_func(True)
@@ -483,25 +482,24 @@ class TestDriverBlockDevice(test.NoDBTestCase):
volume['shared_targets'] = True
volume['service_uuid'] = uuids.service_uuid
+ if delete_attachment_raises:
+ self.volume_api.attachment_delete.side_effect = (
+ delete_attachment_raises)
+
+ self.virt_driver.get_volume_connector.return_value = connector
+
with test.nested(
mock.patch.object(driver_bdm, '_get_volume', return_value=volume),
- mock.patch.object(self.virt_driver, 'get_volume_connector',
- return_value=connector),
mock.patch('os_brick.initiator.utils.guard_connection'),
- mock.patch.object(self.volume_api, 'attachment_delete'),
- ) as (mock_get_volume, mock_get_connector, mock_guard,
- vapi_attach_del):
-
- if delete_attachment_raises:
- vapi_attach_del.side_effect = delete_attachment_raises
+ ) as (mock_get_volume, mock_guard):
driver_bdm.detach(elevated_context, instance,
self.volume_api, self.virt_driver,
attachment_id=attachment_id)
mock_guard.assert_called_once_with(volume)
- vapi_attach_del.assert_called_once_with(elevated_context,
- attachment_id)
+ self.volume_api.attachment_delete.assert_called_once_with(
+ elevated_context, attachment_id)
def test_volume_delete_attachment_with_shared_targets(self):
self.test_volume_delete_attachment(include_shared_targets=True)
@@ -952,31 +950,28 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
- with test.nested(
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_get_snap, vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_get_snap.assert_called_once_with(
- self.context, 'fake-snapshot-id-1')
- vol_create.assert_called_once_with(
- self.context, 3, '', '', availability_zone=None,
- snapshot=snapshot, volume_type=None)
- vol_delete.assert_called_once_with(self.context, volume['id'])
+ self.volume_api.get_snapshot.return_value = snapshot
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
+
+ self.volume_api.get_snapshot.assert_called_once_with(
+ self.context, 'fake-snapshot-id-1')
+ self.volume_api.create.assert_called_once_with(
+ self.context, 3, '', '', availability_zone=None,
+ snapshot=snapshot, volume_type=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['volsnapshot'](
@@ -984,19 +979,17 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot'),
- mock.patch.object(self.volume_api, 'create'),
- ) as (mock_attach, mock_get_snapshot, mock_create):
+ with mock.patch.object(
+ self.driver_classes['volume'], 'attach'
+ ) as mock_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
- mock_get_snapshot.assert_not_called()
- mock_create.assert_not_called()
+ self.volume_api.get_snapshot.assert_not_called()
+ self.volume_api.create.assert_not_called()
def test_snapshot_attach_no_volume_and_no_volume_type(self):
bdm = self.driver_classes['volsnapshot'](self.volsnapshot_bdm)
@@ -1006,15 +999,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
original_volume = {'id': uuids.original_volume_id,
'volume_type_id': 'original_volume_type'}
new_volume = {'id': uuids.new_volume_id}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(self.volume_api, 'get',
- return_value=original_volume),
- mock.patch.object(self.volume_api, 'create',
- return_value=new_volume),
- ) as (mock_attach, mock_get_snapshot, mock_get, mock_create):
+ self.volume_api.get_snapshot.return_value = snapshot
+ self.volume_api.get.return_value = original_volume
+ self.volume_api.create.return_value = new_volume
+ with mock.patch.object(self.driver_classes["volume"], "attach"):
bdm.volume_id = None
bdm.volume_type = None
bdm.attach(self.context, instance, self.volume_api,
@@ -1022,10 +1010,11 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# Assert that the original volume type is fetched, stored within
# the bdm and then used to create the new snapshot based volume.
- mock_get.assert_called_once_with(self.context,
- uuids.original_volume_id)
+ self.volume_api.get.assert_called_once_with(
+ self.context, uuids.original_volume_id)
self.assertEqual('original_volume_type', bdm.volume_type)
- mock_create.assert_called_once_with(self.context, bdm.volume_size,
+ self.volume_api.create.assert_called_once_with(
+ self.context, bdm.volume_size,
'', '', volume_type='original_volume_type', snapshot=snapshot,
availability_zone=None)
@@ -1097,27 +1086,25 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
- with test.nested(
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_create.assert_called_once_with(
- self.context, 1, '', '', image_id=image['id'],
- availability_zone=None, volume_type=None)
- vol_delete.assert_called_once_with(self.context, volume['id'])
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
+
+ self.volume_api.create.assert_called_once_with(
+ self.context, 1, '', '', image_id=image['id'],
+ availability_zone=None, volume_type=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_image_attach_volume(self):
test_bdm = self.driver_classes['volimage'](
@@ -1125,19 +1112,17 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot'),
- mock.patch.object(self.volume_api, 'create'),
- ) as (mock_attch, mock_get_snapshot, mock_create):
+ with mock.patch.object(
+ self.driver_classes['volume'], 'attach'
+ ) as mock_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
- mock_attch.assert_called_once_with(
+ mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
- mock_get_snapshot.assert_not_called()
- mock_create.assert_not_called()
+ self.volume_api.get_snapshot.assert_not_called()
+ self.volume_api.create.assert_not_called()
def test_blank_attach_fail_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
@@ -1149,30 +1134,26 @@ class TestDriverBlockDevice(test.NoDBTestCase):
**{'uuid': uuids.uuid})
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
- with test.nested(
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_create.assert_called_once_with(
- self.context, test_bdm.volume_size,
- '%s-blank-vol' % uuids.uuid,
- '', volume_type=None, availability_zone=None)
- vol_delete.assert_called_once_with(
- self.context, volume['id'])
+ self.volume_api.create.assert_called_once_with(
+ self.context, test_bdm.volume_size,
+ '%s-blank-vol' % uuids.uuid,
+ '', volume_type=None, availability_zone=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_blank_attach_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
@@ -1481,13 +1462,9 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'display_name': 'fake-snapshot-vol'}
self.stub_volume_create(volume)
- with test.nested(
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(volume_class, 'attach')
- ) as (
- vol_get_snap, vol_attach
- ):
+ self.volume_api.get_snapshot.return_value = snapshot
+
+ with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 085b169db3..563330b541 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -16,6 +16,8 @@ import os
import mock
from oslo_concurrency import processutils
+from oslo_serialization import jsonutils
+from oslo_utils import imageutils
from nova.compute import utils as compute_utils
from nova import exception
@@ -135,3 +137,47 @@ class QemuTestCase(test.NoDBTestCase):
'-O', 'out_format', '-f', 'in_format', 'source', 'dest')
mock_disk_op_sema.__enter__.assert_called_once()
self.assertTupleEqual(expected, mock_execute.call_args[0])
+
+ def test_convert_image_vmdk_allowed_list_checking(self):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+
+ # If the format is not in the allowed list, we should get an error
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With the format in the allowed list, no error
+ self.flags(vmdk_allowed_types=['streamOptimized', 'monolithicFlat',
+ 'monolithicSparse'],
+ group='compute')
+ images.check_vmdk_image('foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With an empty list, allow nothing
+ self.flags(vmdk_allowed_types=[], group='compute')
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ @mock.patch.object(images, 'fetch')
+ @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info')
+ def test_fetch_checks_vmdk_rules(self, mock_info, mock_fetch):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+ mock_info.return_value = jsonutils.dumps(info)
+ with mock.patch('os.path.exists', return_value=True):
+ e = self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw, None, 'foo', 'anypath')
+ self.assertIn('Invalid VMDK create-type specified', str(e))
diff --git a/nova/tests/unit/virt/vmwareapi/test_images.py b/nova/tests/unit/virt/vmwareapi/test_images.py
index 7cfec00c97..b3a3cfd941 100644
--- a/nova/tests/unit/virt/vmwareapi/test_images.py
+++ b/nova/tests/unit/virt/vmwareapi/test_images.py
@@ -117,13 +117,11 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
@@ -172,7 +170,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock_write_handle)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
- mock_call_method.assert_called_once_with(
+ session._call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
@mock.patch('oslo_vmware.rw_handles.ImageReadHandle')
@@ -188,13 +186,11 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
@@ -220,7 +216,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock_image_transfer.assert_called_once_with(mock_read_handle,
mock_write_handle)
- mock_call_method.assert_called_once_with(
+ session._call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index 0c170c05e4..ffa46ce2aa 100644
--- a/nova/tests/unit/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -520,16 +520,15 @@ class CinderApiTestCase(test.NoDBTestCase):
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_failed(self, mock_cinderclient, mock_log):
mock_cinderclient.return_value.attachments.delete.side_effect = (
- cinder_exception.NotFound(404, '404'))
+ cinder_exception.BadRequest(400, '400'))
attachment_id = uuids.attachment
- ex = self.assertRaises(exception.VolumeAttachmentNotFound,
+ ex = self.assertRaises(exception.InvalidInput,
self.api.attachment_delete,
self.ctx,
attachment_id)
- self.assertEqual(404, ex.code)
- self.assertIn(attachment_id, str(ex))
+ self.assertEqual(400, ex.code)
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=exception.CinderAPIVersionNotAvailable(
@@ -546,6 +545,16 @@ class CinderApiTestCase(test.NoDBTestCase):
skip_version_check=True)
@mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_not_found(self, mock_cinderclient):
+ mock_cinderclient.return_value.attachments.delete.side_effect = (
+ cinder_exception.ClientException(404))
+
+ attachment_id = uuids.attachment
+ self.api.attachment_delete(self.ctx, attachment_id)
+
+ self.assertEqual(1, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_internal_server_error(self, mock_cinderclient):
mock_cinderclient.return_value.attachments.delete.side_effect = (
cinder_exception.ClientException(500))
@@ -569,6 +578,29 @@ class CinderApiTestCase(test.NoDBTestCase):
self.assertEqual(2, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_gateway_timeout(self, mock_cinderclient):
+ mock_cinderclient.return_value.attachments.delete.side_effect = (
+ cinder_exception.ClientException(504))
+
+ self.assertRaises(cinder_exception.ClientException,
+ self.api.attachment_delete,
+ self.ctx, uuids.attachment_id)
+
+ self.assertEqual(5, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_gateway_timeout_do_not_raise(
+ self, mock_cinderclient):
+ # generate exception, and then have a normal return on the next retry
+ mock_cinderclient.return_value.attachments.delete.side_effect = [
+ cinder_exception.ClientException(504), None]
+
+ attachment_id = uuids.attachment
+ self.api.attachment_delete(self.ctx, attachment_id)
+
+ self.assertEqual(2, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_bad_request_exception(self, mock_cinderclient):
mock_cinderclient.return_value.attachments.delete.side_effect = (
cinder_exception.BadRequest(400))
@@ -1243,3 +1275,14 @@ class CinderClientTestCase(test.NoDBTestCase):
admin_ctx = context.get_admin_context()
params = cinder._get_cinderclient_parameters(admin_ctx)
self.assertEqual(params[0], mock_admin_auth)
+
+ @mock.patch('nova.service_auth._SERVICE_AUTH')
+ @mock.patch('nova.volume.cinder._ADMIN_AUTH')
+ def test_admin_context_without_user_token_but_with_service_token(
+ self, mock_admin_auth, mock_service_auth
+ ):
+ self.flags(send_service_user_token=True, group='service_user')
+ admin_ctx = context.get_admin_context()
+ params = cinder._get_cinderclient_parameters(admin_ctx)
+ self.assertEqual(mock_admin_auth, params[0].user_auth)
+ self.assertEqual(mock_service_auth, params[0].service_auth)
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 5aab8ce300..02fc1f07bc 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -891,6 +891,36 @@ class FakeLiveMigrateDriverWithNestedCustomResources(
class FakeDriverWithPciResources(SmallFakeDriver):
+ """NOTE: this driver provides symmetric compute nodes. Each compute will
+ have the same resources with the same addresses. It is dangerous as using
+ this driver can hide issues when in an asymmetric environment nova fails to
+ update entities according to the host specific addresses (e.g. pci_slot of
+ the neutron port bindings).
+
+ The current non virt driver specific functional test environment has many
+ shortcomings making it really hard to simulate host specific virt drivers.
+
+ 1) The virt driver is instantiated by the service logic from the name of
+ the driver class. This makes passing input to the driver instance from the
+ test at init time pretty impossible. This could be solved with some
+ fixtures around nova.virt.driver.load_compute_driver()
+
+ 2) The compute service access the hypervisor not only via the virt
+ interface but also reads the sysfs of the host. So simply providing a fake
+ virt driver instance is not enough to isolate simulated compute services
+ that are running on the same host. Also these low level sysfs reads are not
+ having host specific information in the call params. So simply mocking the
+ low level call does not give a way to provide host specific return values.
+
+ 3) CONF is global, and it is read dynamically by the driver. So
+ providing host specific CONF to driver instances without race conditions
+ between the drivers are extremely hard especially if periodic tasks are
+ enabled.
+
+ The libvirt based functional test env under nova.tests.functional.libvirt
+ has better support to create asymmetric environments. So please consider
+ using that if possible instead.
+ """
PCI_ADDR_PF1 = '0000:01:00.0'
PCI_ADDR_PF1_VF1 = '0000:01:00.1'
@@ -955,6 +985,11 @@ class FakeDriverWithPciResources(SmallFakeDriver):
],
group='pci')
+ # These mocks should be removed after bug
+ # https://bugs.launchpad.net/nova/+bug/1961587 has been fixed and
+ # every SRIOV device related information is transferred through the
+ # virt driver and the PciDevice object instead of queried with
+ # sysfs calls by the network.neutron.API code.
self.useFixture(fixtures.MockPatch(
'nova.pci.utils.get_mac_by_pci_address',
return_value='52:54:00:1e:59:c6'))
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 3ec7e90c30..08adeada76 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -747,7 +747,7 @@ class VMOps(object):
# should be disconnected even if the VM doesn't exist anymore,
# so they are not leaked.
self.unplug_vifs(instance, network_info)
- self._volumeops.disconnect_volumes(block_device_info)
+ self._volumeops.disconnect_volumes(block_device_info, force=True)
if destroy_disks:
self._delete_disk_files(instance_name)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index da5b40f375..d2bfed2441 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -59,10 +59,10 @@ class VolumeOps(object):
for vol in volumes:
self.attach_volume(vol['connection_info'], instance_name)
- def disconnect_volumes(self, block_device_info):
+ def disconnect_volumes(self, block_device_info, force=False):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
- self.disconnect_volume(vol['connection_info'])
+ self.disconnect_volume(vol['connection_info'], force=force)
def attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
@@ -116,9 +116,9 @@ class VolumeOps(object):
volume_driver.set_disk_qos_specs(connection_info,
qos_specs)
- def disconnect_volume(self, connection_info):
+ def disconnect_volume(self, connection_info, force=False):
volume_driver = self._get_volume_driver(connection_info)
- volume_driver.disconnect_volume(connection_info)
+ volume_driver.disconnect_volume(connection_info, force=force)
def detach_volume(self, connection_info, instance_name):
LOG.debug("Detaching volume: %(connection_info)s "
@@ -231,8 +231,8 @@ class BaseVolumeDriver(object):
def connect_volume(self, connection_info):
return self._connector.connect_volume(connection_info['data'])
- def disconnect_volume(self, connection_info):
- self._connector.disconnect_volume(connection_info['data'])
+ def disconnect_volume(self, connection_info, force=False):
+ self._connector.disconnect_volume(connection_info['data'], force=force)
def get_disk_resource_path(self, connection_info):
disk_paths = self._connector.get_volume_paths(connection_info['data'])
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 5358f3766a..f13c872290 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -110,6 +110,34 @@ def get_info(context, image_href):
return IMAGE_API.get(context, image_href)
+def check_vmdk_image(image_id, data):
+ # Check some rules about VMDK files. Specifically we want to make
+ # sure that the "create-type" of the image is one that we allow.
+ # Some types of VMDK files can reference files outside the disk
+ # image and we do not want to allow those for obvious reasons.
+
+ types = CONF.compute.vmdk_allowed_types
+
+ if not len(types):
+ LOG.warning('Refusing to allow VMDK image as vmdk_allowed_'
+ 'types is empty')
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ try:
+ create_type = data.format_specific['data']['create-type']
+ except KeyError:
+ msg = _('Unable to determine VMDK create-type')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ if create_type not in CONF.compute.vmdk_allowed_types:
+ LOG.warning('Refusing to process VMDK file with create-type of %r '
+ 'which is not in allowed set of: %s', create_type,
+ ','.join(CONF.compute.vmdk_allowed_types))
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+
def fetch_to_raw(context, image_href, path, trusted_certs=None):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, trusted_certs)
@@ -129,6 +157,9 @@ def fetch_to_raw(context, image_href, path, trusted_certs=None):
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
+ if fmt == 'vmdk':
+ check_vmdk_image(image_href, data)
+
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw", image_href, fmt)
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 1a81be3ade..47e92e3ca9 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -3299,6 +3299,7 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
root_name="capability", **kwargs)
self.type = None
self.iommu_group = None
+ self.uuid = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevInformation,
@@ -3308,6 +3309,8 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
self.type = c.get('id')
if c.tag == "iommuGroup":
self.iommu_group = int(c.get('number'))
+ if c.tag == "uuid":
+ self.uuid = c.text
class LibvirtConfigNodeDeviceVpdCap(LibvirtConfigObject):
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 7c0abcb150..615a009e06 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1639,7 +1639,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
self._disconnect_volume(
context, connection_info, instance,
- destroy_secrets=destroy_secrets)
+ destroy_secrets=destroy_secrets, force=True)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if cleanup_instance_disks:
@@ -1956,7 +1956,7 @@ class LibvirtDriver(driver.ComputeDriver):
return (False if connection_count > 1 else True)
def _disconnect_volume(self, context, connection_info, instance,
- encryption=None, destroy_secrets=True):
+ encryption=None, destroy_secrets=True, force=False):
self._detach_encryptor(
context,
connection_info,
@@ -1968,7 +1968,8 @@ class LibvirtDriver(driver.ComputeDriver):
multiattach = connection_info.get('multiattach', False)
if self._should_disconnect_target(
context, instance, multiattach, vol_driver, volume_id):
- vol_driver.disconnect_volume(connection_info, instance)
+ vol_driver.disconnect_volume(
+ connection_info, instance, force=force)
else:
LOG.info('Detected multiple connections on this host for '
'volume: %(volume)s, skipping target disconnect.',
@@ -8019,15 +8020,52 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_mediated_device_information(self, devname):
"""Returns a dict of a mediated device."""
- virtdev = self._host.device_lookup_by_name(devname)
+ # LP #1951656 - In Libvirt 7.7, the mdev name now includes the PCI
+ # address of the parent device (e.g. mdev_<uuid>_<pci_address>) due to
+ # the mdevctl allowing for multiple mediated devs having the same UUID
+ # defined (only one can be active at a time). Since the guest
+ # information doesn't have the parent ID, try to lookup which
+ # mediated device is available that matches the UUID. If multiple
+ # devices are found that match the UUID, then this is an error
+ # condition.
+ try:
+ virtdev = self._host.device_lookup_by_name(devname)
+ except libvirt.libvirtError as ex:
+ if ex.get_error_code() != libvirt.VIR_ERR_NO_NODE_DEVICE:
+ raise
+ mdevs = [dev for dev in self._host.list_mediated_devices()
+ if dev.startswith(devname)]
+ # If no matching devices are found, simply raise the original
+ # exception indicating that no devices are found.
+ if not mdevs:
+ raise
+ elif len(mdevs) > 1:
+ msg = ("The mediated device name %(devname)s refers to a UUID "
+ "that is present in multiple libvirt mediated devices. "
+ "Matching libvirt mediated devices are %(devices)s. "
+ "Mediated device UUIDs must be unique for Nova." %
+ {'devname': devname,
+ 'devices': ', '.join(mdevs)})
+ raise exception.InvalidLibvirtMdevConfig(reason=msg)
+
+ LOG.debug('Found requested device %s as %s. Using that.',
+ devname, mdevs[0])
+ virtdev = self._host.device_lookup_by_name(mdevs[0])
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
+ # Starting with Libvirt 7.3, the uuid information is available in the
+ # node device information. If its there, use that. Otherwise,
+ # fall back to the previous behavior of parsing the uuid from the
+ # devname.
+ if cfgdev.mdev_information.uuid:
+ mdev_uuid = cfgdev.mdev_information.uuid
+ else:
+ mdev_uuid = libvirt_utils.mdev_name2uuid(cfgdev.name)
device = {
"dev_id": cfgdev.name,
- # name is like mdev_00ead764_fdc0_46b6_8db9_2963f5c815b4
- "uuid": libvirt_utils.mdev_name2uuid(cfgdev.name),
+ "uuid": mdev_uuid,
# the physical GPU PCI device
"parent": cfgdev.parent,
"type": cfgdev.mdev_information.type,
@@ -8115,6 +8153,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param requested_types: Filter out the result for only mediated devices
having those types.
"""
+ LOG.debug('Searching for available mdevs...')
allocated_mdevs = self._get_all_assigned_mediated_devices()
mdevs = self._get_mediated_devices(requested_types)
available_mdevs = set()
@@ -8130,6 +8169,7 @@ class LibvirtDriver(driver.ComputeDriver):
available_mdevs.add(mdev["uuid"])
available_mdevs -= set(allocated_mdevs)
+ LOG.info('Available mdevs at: %s.', available_mdevs)
return available_mdevs
def _create_new_mediated_device(self, parent, uuid=None):
@@ -8141,6 +8181,7 @@ class LibvirtDriver(driver.ComputeDriver):
:returns: the newly created mdev UUID or None if not possible
"""
+ LOG.debug('Attempting to create new mdev...')
supported_types = self.supported_vgpu_types
# Try to see if we can still create a new mediated device
devices = self._get_mdev_capable_devices(supported_types)
@@ -8152,6 +8193,7 @@ class LibvirtDriver(driver.ComputeDriver):
# The device is not the one that was called, not creating
# the mdev
continue
+ LOG.debug('Trying on: %s.', dev_name)
dev_supported_type = self._get_vgpu_type_per_pgpu(dev_name)
if dev_supported_type and device['types'][
dev_supported_type]['availableInstances'] > 0:
@@ -8161,7 +8203,13 @@ class LibvirtDriver(driver.ComputeDriver):
pci_addr = "{}:{}:{}.{}".format(*dev_name[4:].split('_'))
chosen_mdev = nova.privsep.libvirt.create_mdev(
pci_addr, dev_supported_type, uuid=uuid)
+ LOG.info('Created mdev: %s on pGPU: %s.',
+ chosen_mdev, pci_addr)
return chosen_mdev
+ LOG.debug('Failed: No available instances on device.')
+ LOG.info('Failed to create mdev. '
+ 'No free space found among the following devices: %s.',
+ [dev['dev_id'] for dev in devices])
@utils.synchronized(VGPU_RESOURCE_SEMAPHORE)
def _allocate_mdevs(self, allocations):
@@ -8244,6 +8292,8 @@ class LibvirtDriver(driver.ComputeDriver):
# Take the first available mdev
chosen_mdev = mdevs_available.pop()
else:
+ LOG.debug('No available mdevs where found. '
+ 'Creating an new one...')
chosen_mdev = self._create_new_mediated_device(parent_device)
if not chosen_mdev:
# If we can't find devices having available VGPUs, just raise
@@ -8251,6 +8301,7 @@ class LibvirtDriver(driver.ComputeDriver):
reason='mdev-capable resource is not available')
else:
chosen_mdevs.append(chosen_mdev)
+ LOG.info('Allocated mdev: %s.', chosen_mdev)
return chosen_mdevs
def _detach_mediated_devices(self, guest):
@@ -10419,10 +10470,13 @@ class LibvirtDriver(driver.ComputeDriver):
:param instance: the instance being migrated
:param migrate_date: a LibvirtLiveMigrateData object
"""
- network_info = network_model.NetworkInfo(
- [vif.source_vif for vif in migrate_data.vifs
- if "source_vif" in vif and vif.source_vif])
- self._reattach_instance_vifs(context, instance, network_info)
+ # NOTE(artom) migrate_data.vifs might not be set if our Neutron doesn't
+ # have the multiple port bindings extension.
+ if 'vifs' in migrate_data and migrate_data.vifs:
+ network_info = network_model.NetworkInfo(
+ [vif.source_vif for vif in migrate_data.vifs
+ if "source_vif" in vif and vif.source_vif])
+ self._reattach_instance_vifs(context, instance, network_info)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
diff --git a/nova/virt/libvirt/guest.py b/nova/virt/libvirt/guest.py
index 53080e41f0..68bd4ca5b0 100644
--- a/nova/virt/libvirt/guest.py
+++ b/nova/virt/libvirt/guest.py
@@ -655,6 +655,7 @@ class Guest(object):
stats = self._domain.jobStats()
return JobInfo(**stats)
except libvirt.libvirtError as ex:
+ errmsg = ex.get_error_message()
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
@@ -667,6 +668,12 @@ class Guest(object):
# away completclsely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return JobInfo(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
+ elif (ex.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR and
+ errmsg and "migration was active, "
+ "but no RAM info was set" in errmsg):
+ LOG.debug("Migration is active or completed but "
+ "virDomainGetJobStats is missing ram: %s", ex)
+ return JobInfo(type=libvirt.VIR_DOMAIN_JOB_NONE)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index cdf47008de..ebcc112534 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -46,6 +46,7 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
+from oslo_utils import strutils
from oslo_utils import units
from oslo_utils import versionutils
@@ -1267,6 +1268,20 @@ class Host(object):
return None
return vpd_cap.card_serial_number
+ def _get_pf_details(self, device: dict, pci_address: str) -> dict:
+ if device.get('dev_type') != fields.PciDeviceType.SRIOV_PF:
+ return {}
+
+ try:
+ return {
+ 'mac_address': pci_utils.get_mac_by_pci_address(pci_address)
+ }
+ except exception.PciDeviceNotFoundById:
+ LOG.debug(
+ 'Cannot get MAC address of the PF %s. It is probably attached '
+ 'to a guest already', pci_address)
+ return {}
+
def _get_pcidev_info(
self,
devname: str,
@@ -1426,6 +1441,7 @@ class Host(object):
_get_device_type(cfgdev, address, dev, net_devs, vdpa_devs))
device.update(_get_device_capabilities(device, dev, net_devs))
device.update(_get_vpd_details(device, dev, pci_devs))
+ device.update(self._get_pf_details(device, address))
return device
def get_vdpa_nodedev_by_address(
@@ -1487,7 +1503,7 @@ class Host(object):
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
- :returns: a list of virNodeDevice instance
+ :returns: a list of strings with the name of the instance
"""
return self._list_devices("mdev", flags=flags)
@@ -1656,9 +1672,9 @@ class Host(object):
return False
with open(SEV_KERNEL_PARAM_FILE) as f:
- contents = f.read()
- LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, contents)
- return contents == "1\n"
+ content = f.read()
+ LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, content)
+ return strutils.bool_from_string(content)
@property
def supports_amd_sev(self) -> bool:
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 834f242c79..a1b9459b7e 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -581,17 +581,31 @@ def get_default_machine_type(arch: str) -> ty.Optional[str]:
def mdev_name2uuid(mdev_name: str) -> str:
- """Convert an mdev name (of the form mdev_<uuid_with_underscores>) to a
- uuid (of the form 8-4-4-4-12).
+ """Convert an mdev name (of the form mdev_<uuid_with_underscores> or
+ mdev_<uuid_with_underscores>_<pciaddress>) to a uuid
+ (of the form 8-4-4-4-12).
+
+ :param mdev_name: the name of the mdev to parse the UUID from
+ :returns: string containing the uuid
"""
- return str(uuid.UUID(mdev_name[5:].replace('_', '-')))
+ mdev_uuid = mdev_name[5:].replace('_', '-')
+ # Unconditionnally remove the PCI address from the name
+ mdev_uuid = mdev_uuid[:36]
+ return str(uuid.UUID(mdev_uuid))
+
+def mdev_uuid2name(mdev_uuid: str, parent: str = None) -> str:
+ """Convert an mdev uuid (of the form 8-4-4-4-12) and optionally its parent
+ device to a name (of the form mdev_<uuid_with_underscores>[_<pciid>]).
-def mdev_uuid2name(mdev_uuid: str) -> str:
- """Convert an mdev uuid (of the form 8-4-4-4-12) to a name (of the form
- mdev_<uuid_with_underscores>).
+ :param mdev_uuid: the uuid of the mediated device
+ :param parent: the parent device id for the mediated device
+ :returns: name of the mdev to reference in libvirt
"""
- return "mdev_" + mdev_uuid.replace('-', '_')
+ name = "mdev_" + mdev_uuid.replace('-', '_')
+ if parent and parent.startswith('pci_'):
+ name = name + parent[4:]
+ return name
def get_flags_by_flavor_specs(flavor: 'objects.Flavor') -> ty.Set[str]:
diff --git a/nova/virt/libvirt/volume/fibrechannel.py b/nova/virt/libvirt/volume/fibrechannel.py
index b50db3aa1c..1f890c95c1 100644
--- a/nova/virt/libvirt/volume/fibrechannel.py
+++ b/nova/virt/libvirt/volume/fibrechannel.py
@@ -59,7 +59,7 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
connection_info['data']['multipath_id'] = \
device_info['multipath_id']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach FC Volume", instance=instance)
@@ -69,11 +69,12 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
# the 2nd param of disconnect_volume and be consistent
# with the rest of the connectors.
self.connector.disconnect_volume(connection_info['data'],
- connection_info['data'])
+ connection_info['data'],
+ force=force)
LOG.debug("Disconnected FC Volume", instance=instance)
super(LibvirtFibreChannelVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/fs.py b/nova/virt/libvirt/volume/fs.py
index 5fb9af4a52..992ef45016 100644
--- a/nova/virt/libvirt/volume/fs.py
+++ b/nova/virt/libvirt/volume/fs.py
@@ -116,7 +116,7 @@ class LibvirtMountedFileSystemVolumeDriver(LibvirtBaseFileSystemVolumeDriver,
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
vol_name = connection_info['data']['name']
mountpoint = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/iscsi.py b/nova/virt/libvirt/volume/iscsi.py
index 564bac14cc..2b25972a49 100644
--- a/nova/virt/libvirt/volume/iscsi.py
+++ b/nova/virt/libvirt/volume/iscsi.py
@@ -66,19 +66,20 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume", instance=instance)
try:
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return
LOG.debug("Disconnected iSCSI Volume", instance=instance)
super(LibvirtISCSIVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/lightos.py b/nova/virt/libvirt/volume/lightos.py
index d6d393994e..6a22bf6dc6 100644
--- a/nova/virt/libvirt/volume/lightos.py
+++ b/nova/virt/libvirt/volume/lightos.py
@@ -42,14 +42,15 @@ class LibvirtLightOSVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
LOG.debug("Connecting NVMe volume with device_info %s", device_info)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk. instance:%s, volume_id:%s",
connection_info.get("instance", ""),
connection_info.get("volume_id", ""))
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
super(LibvirtLightOSVolumeDriver, self).disconnect_volume(
- connection_info, instance)
+ connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size=None):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/nvme.py b/nova/virt/libvirt/volume/nvme.py
index 7436552812..e2977c3572 100644
--- a/nova/virt/libvirt/volume/nvme.py
+++ b/nova/virt/libvirt/volume/nvme.py
@@ -45,13 +45,13 @@ class LibvirtNVMEVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk", instance=instance)
self.connector.disconnect_volume(
- connection_info['data'], None)
+ connection_info['data'], None, force=force)
super(LibvirtNVMEVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/quobyte.py b/nova/virt/libvirt/volume/quobyte.py
index bb7a770e57..2eb4bcfb42 100644
--- a/nova/virt/libvirt/volume/quobyte.py
+++ b/nova/virt/libvirt/volume/quobyte.py
@@ -189,7 +189,7 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
instance=instance)
@utils.synchronized('connect_qb_volume')
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/scaleio.py b/nova/virt/libvirt/volume/scaleio.py
index 7c414c2870..04a9423e8e 100644
--- a/nova/virt/libvirt/volume/scaleio.py
+++ b/nova/virt/libvirt/volume/scaleio.py
@@ -57,12 +57,13 @@ class LibvirtScaleIOVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
instance=instance)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
- self.connector.disconnect_volume(connection_info['data'], None)
+ def disconnect_volume(self, connection_info, instance, force=False):
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Disconnected volume", instance=instance)
super(LibvirtScaleIOVolumeDriver, self).disconnect_volume(
- connection_info, instance)
+ connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
LOG.debug("calling os-brick to extend ScaleIO Volume",
diff --git a/nova/virt/libvirt/volume/smbfs.py b/nova/virt/libvirt/volume/smbfs.py
index d112af750c..9de1ce23cd 100644
--- a/nova/virt/libvirt/volume/smbfs.py
+++ b/nova/virt/libvirt/volume/smbfs.py
@@ -52,7 +52,7 @@ class LibvirtSMBFSVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
device_path = self._get_device_path(connection_info)
connection_info['data']['device_path'] = device_path
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/storpool.py b/nova/virt/libvirt/volume/storpool.py
index 0e71221f5b..e6dffca39a 100644
--- a/nova/virt/libvirt/volume/storpool.py
+++ b/nova/virt/libvirt/volume/storpool.py
@@ -47,10 +47,11 @@ class LibvirtStorPoolVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
device_info, instance=instance)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
LOG.debug("Detaching StorPool volume %s",
connection_info['data']['volume'], instance=instance)
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Detached StorPool volume", instance=instance)
def extend_volume(self, connection_info, instance, requested_size):
diff --git a/nova/virt/libvirt/volume/volume.py b/nova/virt/libvirt/volume/volume.py
index 6d650c80e6..f76c3618b2 100644
--- a/nova/virt/libvirt/volume/volume.py
+++ b/nova/virt/libvirt/volume/volume.py
@@ -135,7 +135,7 @@ class LibvirtBaseVolumeDriver(object):
"""Connect the volume."""
pass
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
pass
diff --git a/nova/virt/libvirt/volume/vzstorage.py b/nova/virt/libvirt/volume/vzstorage.py
index 85ffb45076..babfdef55c 100644
--- a/nova/virt/libvirt/volume/vzstorage.py
+++ b/nova/virt/libvirt/volume/vzstorage.py
@@ -126,9 +126,10 @@ class LibvirtVZStorageVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
return _connect_volume(connection_info, instance)
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach Vzstorage Volume",
instance=instance)
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Disconnected Vzstorage Volume", instance=instance)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index bf1e455bba..f5328148d2 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -91,12 +91,14 @@ def _get_auth(context):
# from them generated from 'context.get_admin_context'
# which only set is_admin=True but is without token.
# So add load_auth_plugin when this condition appear.
+ user_auth = None
if context.is_admin and not context.auth_token:
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
- return _ADMIN_AUTH
- else:
- return service_auth.get_auth_plugin(context)
+ user_auth = _ADMIN_AUTH
+
+ # When user_auth = None, user_auth will be extracted from the context.
+ return service_auth.get_auth_plugin(context, user_auth=user_auth)
# NOTE(efried): Bug #1752152
@@ -888,19 +890,23 @@ class API(object):
@retrying.retry(stop_max_attempt_number=5,
retry_on_exception=lambda e:
(isinstance(e, cinder_exception.ClientException) and
- e.code == 500))
+ e.code in (500, 504)))
def attachment_delete(self, context, attachment_id):
try:
cinderclient(
context, '3.44', skip_version_check=True).attachments.delete(
attachment_id)
except cinder_exception.ClientException as ex:
- with excutils.save_and_reraise_exception():
- LOG.error('Delete attachment failed for attachment '
- '%(id)s. Error: %(msg)s Code: %(code)s',
- {'id': attachment_id,
- 'msg': str(ex),
- 'code': getattr(ex, 'code', None)})
+ if ex.code == 404:
+ LOG.warning('Attachment %(id)s does not exist. Ignoring.',
+ {'id': attachment_id})
+ else:
+ with excutils.save_and_reraise_exception():
+ LOG.error('Delete attachment failed for attachment '
+ '%(id)s. Error: %(msg)s Code: %(code)s',
+ {'id': attachment_id,
+ 'msg': str(ex),
+ 'code': getattr(ex, 'code', None)})
@translate_attachment_exception
def attachment_complete(self, context, attachment_id):
diff --git a/releasenotes/notes/bug-1942329-22b08fa4b322881d.yaml b/releasenotes/notes/bug-1942329-22b08fa4b322881d.yaml
new file mode 100644
index 0000000000..496508ca13
--- /dev/null
+++ b/releasenotes/notes/bug-1942329-22b08fa4b322881d.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ As a fix for `bug 1942329 <https://bugs.launchpad.net/neutron/+bug/1942329>`_
+ nova now updates the MAC address of the ``direct-physical`` ports during
+ mova operations to reflect the MAC address of the physical device on the
+ destination host. Those servers that were created before this fix need to be
+ moved or the port needs to be detached and the re-attached to synchronize the
+ MAC address.
diff --git a/releasenotes/notes/bug-1978444-db46df5f3d5ea19e.yaml b/releasenotes/notes/bug-1978444-db46df5f3d5ea19e.yaml
new file mode 100644
index 0000000000..6c19804074
--- /dev/null
+++ b/releasenotes/notes/bug-1978444-db46df5f3d5ea19e.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ `Bug #1978444 <https://bugs.launchpad.net/nova/+bug/1978444>`_: Now nova
+ retries deleting a volume attachment in case Cinder API returns
+ ``504 Gateway Timeout``. Also, ``404 Not Found`` is now ignored and
+ leaves only a warning message.
diff --git a/releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml b/releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml
new file mode 100644
index 0000000000..a5a3b7c8c2
--- /dev/null
+++ b/releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ `Bug #1981813 <https://bugs.launchpad.net/nova/+bug/1981813>`_: Now nova
+ detects if the ``vnic_type`` of a bound port has been changed in neutron
+ and leaves an ERROR message in the compute service log as such change on a
+ bound port is not supported. Also the restart of the nova-compute service
+ will not crash any more after such port change. Nova will log an ERROR and
+ skip the initialization of the instance with such port during the startup.
diff --git a/releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml b/releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml
new file mode 100644
index 0000000000..943aa99a43
--- /dev/null
+++ b/releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml
@@ -0,0 +1,11 @@
+---
+other:
+ - |
+ A workaround has been added to the libvirt driver to catch and pass
+ migrations that were previously failing with the error:
+
+ ``libvirt.libvirtError: internal error: migration was active, but no RAM info was set``
+
+ See `bug 1982284`_ for more details.
+
+ .. _bug 1982284: https://bugs.launchpad.net/nova/+bug/1982284
diff --git a/releasenotes/notes/fix-group-policy-validation-with-deleted-groups-4f685fd1d6b84192.yaml b/releasenotes/notes/fix-group-policy-validation-with-deleted-groups-4f685fd1d6b84192.yaml
new file mode 100644
index 0000000000..7f7d42bd0e
--- /dev/null
+++ b/releasenotes/notes/fix-group-policy-validation-with-deleted-groups-4f685fd1d6b84192.yaml
@@ -0,0 +1,13 @@
+---
+fixes:
+ - |
+ When the server group policy validation upcall is enabled
+ nova will assert that the policy is not violated on move operations
+ and initial instance creation. As noted in `bug 1890244`_, if a
+ server was created in a server group and that group was later deleted
+ the validation upcall would fail due to an uncaught excpetion if the
+ server group was deleted. This prevented evacuate and other move
+ operations form functioning. This has now been fixed and nova will
+ ignore deleted server groups.
+
+ .. _bug 1890244: https://bugs.launchpad.net/nova/+bug/1890244
diff --git a/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
new file mode 100644
index 0000000000..7e80059b80
--- /dev/null
+++ b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix rescuing volume based instance by adding a check for 'hw_rescue_disk'
+ and 'hw_rescue_device' properties in image metadata before attempting
+ to rescue instance.
diff --git a/releasenotes/notes/service-user-token-421d067c16257782.yaml b/releasenotes/notes/service-user-token-421d067c16257782.yaml
new file mode 100644
index 0000000000..d3af14fbb8
--- /dev/null
+++ b/releasenotes/notes/service-user-token-421d067c16257782.yaml
@@ -0,0 +1,11 @@
+upgrade:
+ - |
+ Configuration of service user tokens is now **required** for all Nova services
+ to ensure security of block-storage volume data.
+
+ All Nova configuration files must configure the ``[service_user]`` section as
+ described in the `documentation`__.
+
+ See https://bugs.launchpad.net/nova/+bug/2004555 for more details.
+
+ __ https://docs.openstack.org/nova/latest/admin/configuration/service-user-token.html
diff --git a/releasenotes/notes/skip-hypervisor-version-check-on-lm-a87f2dcb4f8bf0f2.yaml b/releasenotes/notes/skip-hypervisor-version-check-on-lm-a87f2dcb4f8bf0f2.yaml
new file mode 100644
index 0000000000..00fe6a24c7
--- /dev/null
+++ b/releasenotes/notes/skip-hypervisor-version-check-on-lm-a87f2dcb4f8bf0f2.yaml
@@ -0,0 +1,13 @@
+---
+feature:
+ - |
+ Adds a workaround that allows one to disable hypervisor
+ version-check on live migration. This workaround option can be
+ useful in certain scenarios when upgrading. E.g. if you want to
+ relocate all instances off a compute node due to an emergency
+ hardware issue, and you only have another old compute node ready at
+ the time.
+
+ To enable this, use the config attribute
+ ``[workarounds]skip_hypervisor_version_check_on_lm=True`` in
+ ``nova.conf``. The option defaults to ``False``.
diff --git a/releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml b/releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml
new file mode 100644
index 0000000000..2580f73d35
--- /dev/null
+++ b/releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ When vDPA was first introduced move operations were implemented in the code
+ but untested either in a real environment or in functional tests. Due to
+ this gap nova elected to block move operations for instance with vDPA
+ devices. All move operations except for live migration have now been tested
+ and found to indeed work so the API blocks have now been removed and
+ functional tests introduced. Other operations such as suspend and
+ live migration require code changes to support and will be enabled as new
+ features in the future.
diff --git a/tools/check-cherry-picks.sh b/tools/check-cherry-picks.sh
index 46cef8c225..3042aa1659 100755
--- a/tools/check-cherry-picks.sh
+++ b/tools/check-cherry-picks.sh
@@ -23,7 +23,7 @@ hashes=$(git show --format='%b' --quiet $commit_hash | sed -nr 's/^.cherry picke
checked=0
branches+=""
for hash in $hashes; do
- branch=$(git branch -a --contains "$hash" 2>/dev/null| grep -oE '(master|stable/[a-z]+)')
+ branch=$(git branch -a --contains "$hash" 2>/dev/null| grep -oE '(master|stable/[a-z0-9.]+)')
if [ $? -ne 0 ]; then
echo "Cherry pick hash $hash not on any master or stable branches"
exit 1