summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml16
-rw-r--r--doc/source/admin/availability-zones.rst2
-rw-r--r--doc/source/admin/configuration/cross-cell-resize.rst2
-rw-r--r--doc/source/admin/configuration/index.rst1
-rw-r--r--doc/source/admin/configuration/service-user-token.rst59
-rw-r--r--doc/source/admin/live-migration-usage.rst2
-rw-r--r--doc/source/admin/migrate-instance-with-snapshot.rst2
-rw-r--r--doc/source/admin/scheduling.rst31
-rw-r--r--doc/source/admin/support-compute.rst64
-rw-r--r--doc/source/install/compute-install-obs.rst20
-rw-r--r--doc/source/install/compute-install-rdo.rst20
-rw-r--r--doc/source/install/compute-install-ubuntu.rst20
-rw-r--r--doc/source/install/controller-install-obs.rst20
-rw-r--r--doc/source/install/controller-install-rdo.rst20
-rw-r--r--doc/source/install/controller-install-ubuntu.rst20
-rw-r--r--doc/source/reference/isolate-aggregates.rst2
-rw-r--r--nova/api/openstack/wsgi.py6
-rw-r--r--nova/cmd/status.py11
-rw-r--r--nova/compute/manager.py14
-rw-r--r--nova/compute/resource_tracker.py38
-rw-r--r--nova/conf/scheduler.py43
-rw-r--r--nova/network/neutron.py10
-rw-r--r--nova/scheduler/weights/hypervisor_version.py39
-rw-r--r--nova/service_auth.py6
-rw-r--r--nova/tests/fixtures/nova.py71
-rw-r--r--nova/tests/functional/libvirt/base.py1
-rw-r--r--nova/tests/functional/libvirt/test_evacuate.py1
-rw-r--r--nova/tests/functional/libvirt/test_numa_servers.py70
-rw-r--r--nova/tests/functional/libvirt/test_vpmem.py1
-rw-r--r--nova/tests/functional/regressions/test_bug_1595962.py1
-rw-r--r--nova/tests/functional/regressions/test_bug_1995153.py107
-rw-r--r--nova/tests/unit/cmd/test_status.py16
-rw-r--r--nova/tests/unit/compute/test_compute.py21
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py43
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py21
-rw-r--r--nova/tests/unit/compute/test_shelve.py4
-rw-r--r--nova/tests/unit/network/test_neutron.py34
-rw-r--r--nova/tests/unit/scheduler/fakes.py2
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py97
-rw-r--r--nova/tests/unit/test_service_auth.py10
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeops.py26
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py111
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py64
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py20
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_iscsi.py9
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_lightos.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nvme.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_scaleio.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_storpool.py16
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_vzstorage.py8
-rw-r--r--nova/tests/unit/virt/test_hardware.py29
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py1
-rw-r--r--nova/tests/unit/volume/test_cinder.py11
-rw-r--r--nova/virt/hardware.py11
-rw-r--r--nova/virt/hyperv/driver.py8
-rw-r--r--nova/virt/hyperv/vmops.py2
-rw-r--r--nova/virt/hyperv/volumeops.py12
-rw-r--r--nova/virt/libvirt/driver.py7
-rw-r--r--nova/virt/libvirt/host.py31
-rw-r--r--nova/virt/libvirt/volume/fibrechannel.py7
-rw-r--r--nova/virt/libvirt/volume/fs.py2
-rw-r--r--nova/virt/libvirt/volume/iscsi.py7
-rw-r--r--nova/virt/libvirt/volume/lightos.py7
-rw-r--r--nova/virt/libvirt/volume/nvme.py6
-rw-r--r--nova/virt/libvirt/volume/quobyte.py2
-rw-r--r--nova/virt/libvirt/volume/scaleio.py7
-rw-r--r--nova/virt/libvirt/volume/smbfs.py2
-rw-r--r--nova/virt/libvirt/volume/storpool.py5
-rw-r--r--nova/virt/libvirt/volume/volume.py2
-rw-r--r--nova/virt/libvirt/volume/vzstorage.py5
-rw-r--r--nova/volume/cinder.py8
-rw-r--r--releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml6
-rw-r--r--releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml20
-rw-r--r--releasenotes/notes/service-user-token-421d067c16257782.yaml11
-rwxr-xr-xtools/check-cherry-picks.sh2
76 files changed, 1213 insertions, 246 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index abe4d2fa4a..e7f2668930 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -595,12 +595,13 @@
irrelevant-files: *nova-base-irrelevant-files
required-projects:
- openstack/nova
+ timeout: 9000
pre-run:
- playbooks/ceph/glance-setup.yaml
vars:
# NOTE(danms): Increase our swap size since we're dealing with
# larger images and trigger OOMs.
- configure_swap_size: 4096
+ configure_swap_size: 8192
# NOTE(danms): These tests create an empty non-raw image, which nova
# will refuse because we set never_download_image_if_on_rbd in this job.
# Just skip these tests for this case.
@@ -658,15 +659,6 @@
image_conversion:
output_format: raw
-# TODO(gmann): As per the 2023.1 testing runtime, we need to run at least
-# one job on Focal. This job can be removed as per the future testing
-# runtime (whenever we drop the Ubuntu Focal testing).
-- job:
- name: tempest-integrated-compute-ubuntu-focal
- description: This is integrated compute job testing on Ubuntu Focal(20.04)
- parent: tempest-integrated-compute
- nodeset: openstack-single-node-focal
-
# TODO(gmann): Remove this jobs once all the required services for intergrate
# compute gate (Cinder, Glance, Neutron) by default enable scope and new
# defaults which means all the nova jobs will be tested with new RBAC in
@@ -753,8 +745,6 @@
- ^setup.cfg$
- ^tools/.*$
- ^tox.ini$
- - tempest-integrated-compute-ubuntu-focal:
- irrelevant-files: *policies-irrelevant-files
- tempest-integrated-compute-enforce-scope-new-defaults:
irrelevant-files: *policies-irrelevant-files
- grenade-skip-level-always:
@@ -790,8 +780,6 @@
- ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
- tempest-integrated-compute:
irrelevant-files: *policies-irrelevant-files
- - tempest-integrated-compute-ubuntu-focal:
- irrelevant-files: *policies-irrelevant-files
- tempest-integrated-compute-enforce-scope-new-defaults:
irrelevant-files: *policies-irrelevant-files
- grenade-skip-level-always:
diff --git a/doc/source/admin/availability-zones.rst b/doc/source/admin/availability-zones.rst
index aff8a0ab31..28c4451b60 100644
--- a/doc/source/admin/availability-zones.rst
+++ b/doc/source/admin/availability-zones.rst
@@ -39,7 +39,7 @@ when comparing availability zones and host aggregates:
The use of the default availability zone name in requests can be very
error-prone. Since the user can see the list of availability zones, they
have no way to know whether the default availability zone name (currently
- ``nova``) is provided because an host belongs to an aggregate whose AZ
+ ``nova``) is provided because a host belongs to an aggregate whose AZ
metadata key is set to ``nova``, or because there is at least one host
not belonging to any aggregate. Consequently, it is highly recommended
for users to never ever ask for booting an instance by specifying an
diff --git a/doc/source/admin/configuration/cross-cell-resize.rst b/doc/source/admin/configuration/cross-cell-resize.rst
index e51e425774..0c34fd13f5 100644
--- a/doc/source/admin/configuration/cross-cell-resize.rst
+++ b/doc/source/admin/configuration/cross-cell-resize.rst
@@ -284,7 +284,7 @@ Troubleshooting
Timeouts
~~~~~~~~
-Configure a :ref:`service user <user_token_timeout>` in case the user token
+Configure a :ref:`service user <service_user_token>` in case the user token
times out, e.g. during the snapshot and download of a large server image.
If RPC calls are timing out with a ``MessagingTimeout`` error in the logs,
diff --git a/doc/source/admin/configuration/index.rst b/doc/source/admin/configuration/index.rst
index 233597b1fe..f5b6fde9da 100644
--- a/doc/source/admin/configuration/index.rst
+++ b/doc/source/admin/configuration/index.rst
@@ -19,6 +19,7 @@ A list of config options based on different topics can be found below:
.. toctree::
:maxdepth: 1
+ /admin/configuration/service-user-token
/admin/configuration/api
/admin/configuration/resize
/admin/configuration/cross-cell-resize
diff --git a/doc/source/admin/configuration/service-user-token.rst b/doc/source/admin/configuration/service-user-token.rst
new file mode 100644
index 0000000000..740730af1d
--- /dev/null
+++ b/doc/source/admin/configuration/service-user-token.rst
@@ -0,0 +1,59 @@
+.. _service_user_token:
+
+===================
+Service User Tokens
+===================
+
+.. note::
+
+ Configuration of service user tokens is **required** for every Nova service
+ for security reasons. See https://bugs.launchpad.net/nova/+bug/2004555 for
+ details.
+
+Configure Nova to send service user tokens alongside regular user tokens when
+making REST API calls to other services. The identity service (Keystone) will
+authenticate a request using the service user token if the regular user token
+has expired.
+
+This is important when long-running operations such as live migration or
+snapshot take long enough to exceed the expiry of the user token. Without the
+service token, if a long-running operation exceeds the expiry of the user
+token, post operations such as cleanup after a live migration could fail when
+Nova calls other service APIs like block-storage (Cinder) or networking
+(Neutron).
+
+The service token is also used by services to validate whether the API caller
+is a service. Some service APIs are restricted to service users only.
+
+To set up service tokens, create a ``nova`` service user and ``service`` role
+in the identity service (Keystone) and assign the ``service`` role to the
+``nova`` service user.
+
+Then, configure the :oslo.config:group:`service_user` section of the Nova
+configuration file, for example:
+
+.. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://104.130.216.102/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = secretservice
+ ...
+
+And configure the other identity options as necessary for the service user,
+much like you would configure nova to work with the image service (Glance) or
+networking service (Neutron).
+
+.. note::
+
+ Please note that the role assigned to the :oslo.config:group:`service_user`
+ needs to be in the configured
+ :oslo.config:option:`keystone_authtoken.service_token_roles` of other
+ services such as block-storage (Cinder), image (Glance), and networking
+ (Neutron).
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index 32c67c2b0a..dc27574f91 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -320,4 +320,4 @@ To make live-migration succeed, you have several options:
If live migrations routinely timeout or fail during cleanup operations due
to the user token timing out, consider configuring nova to use
-:ref:`service user tokens <user_token_timeout>`.
+:ref:`service user tokens <service_user_token>`.
diff --git a/doc/source/admin/migrate-instance-with-snapshot.rst b/doc/source/admin/migrate-instance-with-snapshot.rst
index 65059679ab..230431091e 100644
--- a/doc/source/admin/migrate-instance-with-snapshot.rst
+++ b/doc/source/admin/migrate-instance-with-snapshot.rst
@@ -67,7 +67,7 @@ Create a snapshot of the instance
If snapshot operations routinely fail because the user token times out
while uploading a large disk image, consider configuring nova to use
- :ref:`service user tokens <user_token_timeout>`.
+ :ref:`service user tokens <service_user_token>`.
#. Use the :command:`openstack image list` command to check the status
until the status is ``ACTIVE``:
diff --git a/doc/source/admin/scheduling.rst b/doc/source/admin/scheduling.rst
index 9071c92ac9..353514ab55 100644
--- a/doc/source/admin/scheduling.rst
+++ b/doc/source/admin/scheduling.rst
@@ -1049,6 +1049,37 @@ Otherwise, it will fall back to the
more than one value is found for a host in aggregate metadata, the minimum
value will be used.
+``HypervisorVersionWeigher``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 28.0.0 (Bobcat)
+
+Weigh hosts by their relative hypervisor version reported by the virt driver.
+
+While the hypervisor_version filed for all virt drivers is an int,
+each nova virt driver uses a different algorithm to convert the hypervisor-specific
+version sequence into an int. As such the values are not directly comparable between
+hosts with different hypervisors.
+
+For example, the ironic virt driver uses the ironic API micro-version as the hypervisor
+version for a given node. The libvirt driver uses the libvirt version
+i.e. Libvirt `7.1.123` becomes `700100123` vs Ironic `1.82` becomes `1`
+Hyper-V `6.3` becomes `6003`.
+
+If you have a mixed virt driver deployment in the ironic vs non-ironic
+case nothing special needs to be done. ironic nodes are scheduled using custom
+resource classes so ironic flavors will never match non-ironic compute nodes.
+
+If a deployment has multiple non-ironic virt drivers it is recommended to use aggregates
+to group hosts by virt driver. While this is not strictly required, it is
+desirable to avoid bias towards one virt driver.
+see :ref:`filtering_hosts_by_isolating_aggregates` and :ref:`AggregateImagePropertiesIsolation`
+for more information.
+
+The default behavior of the HypervisorVersionWeigher is to select newer hosts.
+If you prefer to invert the behavior set the
+:oslo.config:option:`filter_scheduler.hypervisor_version_weight_multiplier` option
+to a negative number and the weighing has the opposite effect of the default.
Utilization-aware scheduling
----------------------------
diff --git a/doc/source/admin/support-compute.rst b/doc/source/admin/support-compute.rst
index 8522e51d79..31e32fd1dd 100644
--- a/doc/source/admin/support-compute.rst
+++ b/doc/source/admin/support-compute.rst
@@ -478,67 +478,3 @@ Ensure the ``compute`` endpoint in the identity service catalog is pointing
at ``/v2.1`` instead of ``/v2``. The former route supports microversions,
while the latter route is considered the legacy v2.0 compatibility-mode
route which renders all requests as if they were made on the legacy v2.0 API.
-
-
-.. _user_token_timeout:
-
-User token times out during long-running operations
----------------------------------------------------
-
-Problem
-~~~~~~~
-
-Long-running operations such as live migration or snapshot can sometimes
-overrun the expiry of the user token. In such cases, post operations such
-as cleaning up after a live migration can fail when the nova-compute service
-needs to cleanup resources in other services, such as in the block-storage
-(cinder) or networking (neutron) services.
-
-For example:
-
-.. code-block:: console
-
- 2018-12-17 13:47:29.591 16987 WARNING nova.virt.libvirt.migration [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live migration not completed after 2400 sec
- 2018-12-17 13:47:30.097 16987 WARNING nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Migration operation was cancelled
- 2018-12-17 13:47:30.299 16987 ERROR nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live Migration failure: operation aborted: migration job: canceled by client: libvirtError: operation aborted: migration job: canceled by client
- 2018-12-17 13:47:30.685 16987 INFO nova.compute.manager [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Swapping old allocation on 3e32d595-bd1f-4136-a7f4-c6703d2fbe18 held by migration 17bec61d-544d-47e0-a1c1-37f9d7385286 for instance
- 2018-12-17 13:47:32.450 16987 ERROR nova.volume.cinder [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] Delete attachment failed for attachment 58997d5b-24f0-4073-819e-97916fb1ee19. Error: The request you have made requires authentication. (HTTP 401) Code: 401: Unauthorized: The request you have made requires authentication. (HTTP 401)
-
-Solution
-~~~~~~~~
-
-Configure nova to use service user tokens to supplement the regular user token
-used to initiate the operation. The identity service (keystone) will then
-authenticate a request using the service user token if the user token has
-already expired.
-
-To use, create a service user in the identity service similar as you would when
-creating the ``nova`` service user.
-
-Then configure the :oslo.config:group:`service_user` section of the nova
-configuration file, for example:
-
-.. code-block:: ini
-
- [service_user]
- send_service_user_token = True
- auth_type = password
- project_domain_name = Default
- project_name = service
- user_domain_name = Default
- password = secretservice
- username = nova
- auth_url = https://104.130.216.102/identity
- ...
-
-And configure the other identity options as necessary for the service user,
-much like you would configure nova to work with the image service (glance)
-or networking service.
-
-.. note::
-
- Please note that the role of the :oslo.config:group:`service_user` you
- configure needs to be a superset of
- :oslo.config:option:`keystone_authtoken.service_token_roles` (The option
- :oslo.config:option:`keystone_authtoken.service_token_roles` is configured
- in cinder, glance and neutron).
diff --git a/doc/source/install/compute-install-obs.rst b/doc/source/install/compute-install-obs.rst
index c5c1d29fb3..c227b6eba4 100644
--- a/doc/source/install/compute-install-obs.rst
+++ b/doc/source/install/compute-install-obs.rst
@@ -92,6 +92,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/compute-install-rdo.rst b/doc/source/install/compute-install-rdo.rst
index 0a5ad685a6..0c6203a667 100644
--- a/doc/source/install/compute-install-rdo.rst
+++ b/doc/source/install/compute-install-rdo.rst
@@ -84,6 +84,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/compute-install-ubuntu.rst b/doc/source/install/compute-install-ubuntu.rst
index 8605c73316..baf0585e52 100644
--- a/doc/source/install/compute-install-ubuntu.rst
+++ b/doc/source/install/compute-install-ubuntu.rst
@@ -74,6 +74,26 @@ Install and configure components
Comment out or remove any other options in the
``[keystone_authtoken]`` section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/controller-install-obs.rst b/doc/source/install/controller-install-obs.rst
index 18499612c3..01b7bb0f5a 100644
--- a/doc/source/install/controller-install-obs.rst
+++ b/doc/source/install/controller-install-obs.rst
@@ -260,6 +260,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/doc/source/install/controller-install-rdo.rst b/doc/source/install/controller-install-rdo.rst
index fd2419631e..b6098f1776 100644
--- a/doc/source/install/controller-install-rdo.rst
+++ b/doc/source/install/controller-install-rdo.rst
@@ -247,6 +247,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/doc/source/install/controller-install-ubuntu.rst b/doc/source/install/controller-install-ubuntu.rst
index 7282b0b2e2..1363a98ba8 100644
--- a/doc/source/install/controller-install-ubuntu.rst
+++ b/doc/source/install/controller-install-ubuntu.rst
@@ -237,6 +237,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/doc/source/reference/isolate-aggregates.rst b/doc/source/reference/isolate-aggregates.rst
index f5487df912..7b493f4db9 100644
--- a/doc/source/reference/isolate-aggregates.rst
+++ b/doc/source/reference/isolate-aggregates.rst
@@ -13,6 +13,8 @@
License for the specific language governing permissions and limitations
under the License.
+.. _filtering_hosts_by_isolating_aggregates:
+
Filtering hosts by isolating aggregates
=======================================
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 1d17ce1c9f..e64b4a2016 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -538,12 +538,6 @@ class Resource(wsgi.Application):
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
- LOG.debug(f'Request method failure captured:\n'
- f' request: {request}\n'
- f' method: {meth}\n'
- f' exception: {ex}\n'
- f' action_args: {action_args}\n',
- exc_info=1)
response = ex
if not response:
diff --git a/nova/cmd/status.py b/nova/cmd/status.py
index 29e4a5d01e..4a4e28d7e8 100644
--- a/nova/cmd/status.py
+++ b/nova/cmd/status.py
@@ -271,6 +271,15 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+ def _check_service_user_token(self):
+ if not CONF.service_user.send_service_user_token:
+ msg = (_("""
+Service user token configuration is required for all Nova services.
+For more details see the following:
+https://docs.openstack.org/latest/nova/admin/configuration/service-user-token.html""")) # noqa
+ return upgradecheck.Result(upgradecheck.Code.FAILURE, msg)
+ return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+
# The format of the check functions is to return an upgradecheck.Result
# object with the appropriate upgradecheck.Code and details set. If the
# check hits warnings or failures then those should be stored in the
@@ -294,6 +303,8 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
(_('Older than N-1 computes'), _check_old_computes),
# Added in Wallaby
(_('hw_machine_type unset'), _check_machine_type_set),
+ # Added in Bobcat
+ (_('Service User Token Configuration'), _check_service_user_token),
)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 5ea71827fc..5c42aa4d89 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -3791,9 +3791,21 @@ class ComputeManager(manager.Manager):
try:
compute_node = self._get_compute_info(context, self.host)
scheduled_node = compute_node.hypervisor_hostname
- except exception.ComputeHostNotFound:
+ except exception.ComputeHostNotFound as e:
+ # This means we were asked to rebuild one of our own
+ # instances, or another instance as a target of an
+ # evacuation, but we are unable to find a matching compute
+ # node.
LOG.exception('Failed to get compute_info for %s',
self.host)
+ self._set_migration_status(migration, 'failed')
+ self._notify_instance_rebuild_error(context, instance, e,
+ bdms)
+ raise exception.InstanceFaultRollback(
+ inner_exception=exception.BuildAbortException(
+ instance_uuid=instance.uuid,
+ reason=e.format_message()))
+
else:
scheduled_node = instance.node
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 3f911f3708..9ee6670c17 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -146,16 +146,20 @@ class ResourceTracker(object):
during the instance build.
"""
if self.disabled(nodename):
- # instance_claim() was called before update_available_resource()
- # (which ensures that a compute node exists for nodename). We
- # shouldn't get here but in case we do, just set the instance's
- # host and nodename attribute (probably incorrect) and return a
- # NoopClaim.
- # TODO(jaypipes): Remove all the disabled junk from the resource
- # tracker. Servicegroup API-level active-checking belongs in the
- # nova-compute manager.
- self._set_instance_host_and_node(instance, nodename)
- return claims.NopClaim()
+ # If we get here, it means we are trying to claim for an instance
+ # that was scheduled to a node that we do not have in our list,
+ # or is in some other way unmanageable by this node. This would
+ # mean that we are unable to account for resources, create
+ # allocations in placement, or do any of the other accounting
+ # necessary for this to work. In the past, this situation was
+ # effectively ignored silently, but in a world where we track
+ # resources with placement and instance assignment to compute nodes
+ # by service, we can no longer be leaky.
+ raise exception.ComputeResourcesUnavailable(
+ ('Attempt to claim resources for instance %(inst)s '
+ 'on unknown node %(node)s failed') % {
+ 'inst': instance.uuid,
+ 'node': nodename})
# sanity checks:
if instance.host:
@@ -280,9 +284,17 @@ class ResourceTracker(object):
context, instance, new_flavor, nodename, move_type)
if self.disabled(nodename):
- # compute_driver doesn't support resource tracking, just
- # generate the migration record and continue the resize:
- return claims.NopClaim(migration=migration)
+ # This means we were asked to accept an incoming migration to a
+ # node that we do not own or track. We really should not get here,
+ # but if we do, we must refuse to continue with the migration
+ # process, since we cannot account for those resources, create
+ # allocations in placement, etc. This has been a silent resource
+ # leak in the past, but it must be a hard failure now.
+ raise exception.ComputeResourcesUnavailable(
+ ('Attempt to claim move resources for instance %(inst)s on '
+ 'unknown node %(node)s failed') % {
+ 'inst': instance.uuid,
+ 'node': 'nodename'})
cn = self.compute_nodes[nodename]
diff --git a/nova/conf/scheduler.py b/nova/conf/scheduler.py
index c75bd07c5b..c7aa2ad76d 100644
--- a/nova/conf/scheduler.py
+++ b/nova/conf/scheduler.py
@@ -464,6 +464,49 @@ Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
"""),
+ cfg.FloatOpt("hypervisor_version_weight_multiplier",
+ default=1.0,
+ help="""
+Hypervisor Version weight multiplier ratio.
+
+The multiplier is used for weighting hosts based on the reported
+hypervisor version.
+Negative numbers indicate preferring older hosts,
+the default is to prefer newer hosts to aid with upgrades.
+
+Possible values:
+
+* An integer or float value, where the value corresponds to the multiplier
+ ratio for this weigher.
+
+Example:
+
+* Strongly prefer older hosts
+
+ .. code-block:: ini
+
+ [filter_scheduler]
+ hypervisor_version_weight_multiplier=-1000
+
+
+* Moderately prefer new hosts
+
+ .. code-block:: ini
+
+ [filter_scheduler]
+ hypervisor_version_weight_multiplier=2.5
+
+* Disable weigher influence
+
+ .. code-block:: ini
+
+ [filter_scheduler]
+ hypervisor_version_weight_multiplier=0
+
+Related options:
+
+* ``[filter_scheduler] weight_classes``
+"""),
cfg.FloatOpt("io_ops_weight_multiplier",
default=-1.0,
help="""
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index 6c9f19f010..eef2abe364 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -222,13 +222,15 @@ def _get_auth_plugin(context, admin=False):
# support some services (metadata API) where an admin context is used
# without an auth token.
global _ADMIN_AUTH
+ user_auth = None
if admin or (context.is_admin and not context.auth_token):
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
- return _ADMIN_AUTH
+ user_auth = _ADMIN_AUTH
- if context.auth_token:
- return service_auth.get_auth_plugin(context)
+ if context.auth_token or user_auth:
+ # When user_auth = None, user_auth will be extracted from the context.
+ return service_auth.get_auth_plugin(context, user_auth=user_auth)
# We did not get a user token and we should not be using
# an admin token so log an error
@@ -3907,7 +3909,7 @@ class API:
'Failed to get segment IDs for network %s' % network_id) from e
# The segment field of an unconfigured subnet could be None
return [subnet['segment_id'] for subnet in subnets
- if subnet['segment_id'] is not None]
+ if subnet.get('segment_id') is not None]
def get_segment_id_for_subnet(
self,
diff --git a/nova/scheduler/weights/hypervisor_version.py b/nova/scheduler/weights/hypervisor_version.py
new file mode 100644
index 0000000000..0cd7b0a824
--- /dev/null
+++ b/nova/scheduler/weights/hypervisor_version.py
@@ -0,0 +1,39 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Hypervisor Version Weigher. Weigh hosts by their relative hypervior version.
+
+The default is to select newer hosts. If you prefer
+to invert the behavior set the 'hypervisor_version_weight_multiplier' option
+to a negative number and the weighing has the opposite effect of the default.
+"""
+
+import nova.conf
+from nova.scheduler import utils
+from nova.scheduler import weights
+
+CONF = nova.conf.CONF
+
+
+class HypervisorVersionWeigher(weights.BaseHostWeigher):
+
+ def weight_multiplier(self, host_state):
+ """Override the weight multiplier."""
+ return utils.get_weight_multiplier(
+ host_state, 'hypervisor_version_weight_multiplier',
+ CONF.filter_scheduler.hypervisor_version_weight_multiplier)
+
+ def _weigh_object(self, host_state, weight_properties):
+ """Higher weights win. We want newer hosts by default."""
+ # convert None to 0
+ return host_state.hypervisor_version or 0
diff --git a/nova/service_auth.py b/nova/service_auth.py
index f5ae0646d8..aa8fd8fa12 100644
--- a/nova/service_auth.py
+++ b/nova/service_auth.py
@@ -30,8 +30,10 @@ def reset_globals():
_SERVICE_AUTH = None
-def get_auth_plugin(context):
- user_auth = context.get_auth_plugin()
+def get_auth_plugin(context, user_auth=None):
+ # user_auth may be passed in when the RequestContext is anonymous, such as
+ # when get_admin_context() is used for API calls by nova-manage.
+ user_auth = user_auth or context.get_auth_plugin()
if CONF.service_user.send_service_user_token:
global _SERVICE_AUTH
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index 5fd893e7dc..abfc3ecc6c 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -1318,6 +1318,77 @@ class PrivsepFixture(fixtures.Fixture):
nova.privsep.sys_admin_pctxt, 'client_mode', False))
+class CGroupsFixture(fixtures.Fixture):
+ """Mocks checks made for available subsystems on the host's control group.
+
+ The fixture mocks all calls made on the host to verify the capabilities
+ provided by its kernel. Through this, one can simulate the underlying
+ system hosts work on top of and have tests react to expected outcomes from
+ such.
+
+ Use sample:
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ >>> cgroups = self.useFixture(CGroupsFixture(version=2))
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ ... cgroups.version = 2
+
+ :attr version: Arranges mocks to simulate the host interact with nova
+ following the given version of cgroups.
+ Available values are:
+ - 0: All checks related to cgroups will return False.
+ - 1: Checks related to cgroups v1 will return True.
+ - 2: Checks related to cgroups v2 will return True.
+ Defaults to 1.
+ """
+
+ def __init__(self, version=1):
+ self._cpuv1 = None
+ self._cpuv2 = None
+
+ self._version = version
+
+ @property
+ def version(self):
+ return self._version
+
+ @version.setter
+ def version(self, value):
+ self._version = value
+ self._update_mocks()
+
+ def setUp(self):
+ super().setUp()
+ self._cpuv1 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv1_cpu_controller')).mock
+ self._cpuv2 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv2_cpu_controller')).mock
+ self._update_mocks()
+
+ def _update_mocks(self):
+ if not self._cpuv1:
+ return
+
+ if not self._cpuv2:
+ return
+
+ if self.version == 0:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 1:
+ self._cpuv1.return_value = True
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 2:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = True
+ return
+
+ raise ValueError(f"Unknown cgroups version: '{self.version}'.")
+
+
class NoopQuotaDriverFixture(fixtures.Fixture):
"""A fixture to run tests using the NoopQuotaDriver.
diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py
index 7b6ee10631..1ee46a3217 100644
--- a/nova/tests/functional/libvirt/base.py
+++ b/nova/tests/functional/libvirt/base.py
@@ -43,6 +43,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
super(ServersTestBase, self).setUp()
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
self.useFixture(nova_fixtures.OSBrickFixture())
diff --git a/nova/tests/functional/libvirt/test_evacuate.py b/nova/tests/functional/libvirt/test_evacuate.py
index 92d7ffba29..0e89a3cdb6 100644
--- a/nova/tests/functional/libvirt/test_evacuate.py
+++ b/nova/tests/functional/libvirt/test_evacuate.py
@@ -429,6 +429,7 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.GlanceFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
fake_network.set_stub_network_methods(self)
api_fixture = self.useFixture(
diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py
index bb428159ad..5b73e1b965 100644
--- a/nova/tests/functional/libvirt/test_numa_servers.py
+++ b/nova/tests/functional/libvirt/test_numa_servers.py
@@ -346,6 +346,76 @@ class NUMAServersTest(NUMAServersTestBase):
# There shouldn't be any hosts available to satisfy this request
self._run_build_test(flavor_id, end_status='ERROR')
+ def test_create_server_with_mixed_policy_asymmetric_multi_numa(self):
+ """Boot an instance stretched to two NUMA nodes requesting only
+ shared CPUs in one NUMA and only dedicated in the other NUMA node.
+ """
+ # shared dedicated
+ # NUMA0 pCPU | 0 | 2 3
+ # NUMA1 pCPU | | 6 7
+ self.flags(
+ cpu_shared_set='0',
+ cpu_dedicated_set='2,3,6,7',
+ group='compute',
+ )
+ self.flags(vcpu_pin_set=None)
+
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=2, cpu_sockets=1, cpu_cores=4, cpu_threads=1)
+ self.start_compute(host_info=host_info, hostname='compute1')
+
+ # sanity check the created host topology object; this is really just a
+ # test of the fakelibvirt module
+ host_numa = objects.NUMATopology.obj_from_db_obj(
+ objects.ComputeNode.get_by_nodename(
+ self.ctxt, 'compute1',
+ ).numa_topology
+ )
+ self.assertEqual(2, len(host_numa.cells))
+ self.assertEqual({0}, host_numa.cells[0].cpuset)
+ self.assertEqual({2, 3}, host_numa.cells[0].pcpuset)
+
+ self.assertEqual(set(), host_numa.cells[1].cpuset)
+ self.assertEqual({6, 7}, host_numa.cells[1].pcpuset)
+
+ # create a flavor with 1 shared and 2 dedicated CPUs stretched to
+ # different NUMA nodes
+ extra_spec = {
+ 'hw:cpu_policy': 'mixed',
+ 'hw:cpu_dedicated_mask': '^0',
+ 'hw:numa_nodes': '2',
+ 'hw:numa_cpus.0': '0',
+ 'hw:numa_cpus.1': '1,2',
+ 'hw:numa_mem.0': '256',
+ 'hw:numa_mem.1': '768',
+ }
+ flavor_id = self._create_flavor(
+ vcpu=3, memory_mb=1024, extra_spec=extra_spec)
+ expected_usage = {
+ 'DISK_GB': 20, 'MEMORY_MB': 1024, 'PCPU': 2, 'VCPU': 1,
+ }
+ # The only possible solution (ignoring the order of vCPU1,2):
+ # vCPU 0 => pCPU 0, NUMA0, shared
+ # vCPU 1 => pCPU 6, NUMA1, dedicated
+ # vCPU 2 => pCPU 7, NUMA1, dedicated
+ server = self._run_build_test(
+ flavor_id, expected_usage=expected_usage)
+
+ # sanity check the instance topology
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ self.assertEqual(2, len(inst.numa_topology.cells))
+
+ self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
+ self.assertEqual(set(), inst.numa_topology.cells[0].pcpuset)
+ self.assertIsNone(inst.numa_topology.cells[0].cpu_pinning)
+
+ self.assertEqual(set(), inst.numa_topology.cells[1].cpuset)
+ self.assertEqual({1, 2}, inst.numa_topology.cells[1].pcpuset)
+ self.assertEqual(
+ {6, 7},
+ set(inst.numa_topology.cells[1].cpu_pinning.values())
+ )
+
def test_create_server_with_dedicated_policy_old_configuration(self):
"""Create a server using the legacy extra spec and configuration.
diff --git a/nova/tests/functional/libvirt/test_vpmem.py b/nova/tests/functional/libvirt/test_vpmem.py
index 1200f80357..cb524fe8b6 100644
--- a/nova/tests/functional/libvirt/test_vpmem.py
+++ b/nova/tests/functional/libvirt/test_vpmem.py
@@ -77,6 +77,7 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
'nova.privsep.libvirt.get_pmem_namespaces',
return_value=self.fake_pmem_namespaces))
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128,
diff --git a/nova/tests/functional/regressions/test_bug_1595962.py b/nova/tests/functional/regressions/test_bug_1595962.py
index 94421a81f9..9232eea335 100644
--- a/nova/tests/functional/regressions/test_bug_1595962.py
+++ b/nova/tests/functional/regressions/test_bug_1595962.py
@@ -47,6 +47,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.admin_api = api_fixture.admin_api
self.api = api_fixture.api
diff --git a/nova/tests/functional/regressions/test_bug_1995153.py b/nova/tests/functional/regressions/test_bug_1995153.py
new file mode 100644
index 0000000000..f4e61d06df
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1995153.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2023 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+from unittest import mock
+
+from oslo_serialization import jsonutils
+from oslo_utils import units
+
+from nova.objects import fields
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+
+
+class Bug1995153RegressionTest(
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+
+ ALIAS_NAME = 'a1'
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ }
+ )]
+ # we set the numa_affinity policy to required to ensure strict affinity
+ # between pci devices and the guest cpu and memory will be enforced.
+ PCI_ALIAS = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': ALIAS_NAME,
+ 'device_type': fields.PciDeviceType.STANDARD,
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ )]
+
+ def setUp(self):
+ super(Bug1995153RegressionTest, self).setUp()
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=self.PCI_ALIAS,
+ group='pci'
+ )
+ host_manager = self.scheduler.manager.host_manager
+ pci_filter_class = host_manager.filter_cls_map['PciPassthroughFilter']
+ host_pass_mock = mock.Mock(wraps=pci_filter_class().host_passes)
+ self.mock_filter = self.useFixture(fixtures.MockPatch(
+ 'nova.scheduler.filters.pci_passthrough_filter'
+ '.PciPassthroughFilter.host_passes',
+ side_effect=host_pass_mock)).mock
+
+ def test_socket_policy_bug_1995153(self):
+ """Previously, the numa_usage_from_instance_numa() method in
+ hardware.py saved the host NUMAToplogy object with NUMACells that have
+ no `socket` set. This was an omission in the original implementation of
+ the `socket` PCI NUMA affinity policy. The consequence was that any
+ code path that called into numa_usage_from_instance_numa() would
+ clobber the host NUMA topology in the database with a socket-less
+ version. Booting an instance with NUMA toplogy would do that, for
+ example. If then a second instance was booted with the `socket` PCI
+ NUMA affinity policy, it would read the socket-less host NUMATopology
+ from the database, and error out with a NotImplementedError. This was
+ bug 1995153. Demonstrate that this is fixed.
+ """
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2,
+ kB_mem=(16 * units.Gi) // units.Ki)
+ self.flags(cpu_dedicated_set='0-3', group='compute')
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
+
+ self.start_compute(host_info=host_info, pci_info=pci_info)
+
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
+ 'hw:pci_numa_affinity_policy': 'socket'
+ }
+ # Boot a first instance with a guest NUMA topology to run the
+ # numa_usage_from_instance_numa() and update the host NUMATopology in
+ # the database.
+ self._create_server(
+ flavor_id=self._create_flavor(
+ extra_spec={'hw:cpu_policy': 'dedicated'}))
+
+ # Boot an instance with the `socket` PCI NUMA affinity policy and
+ # assert that it boots correctly now.
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+ self.assertTrue(self.mock_filter.called)
diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py
index f5fcc168ee..c6a0ab2d52 100644
--- a/nova/tests/unit/cmd/test_status.py
+++ b/nova/tests/unit/cmd/test_status.py
@@ -446,3 +446,19 @@ class TestCheckMachineTypeUnset(test.NoDBTestCase):
upgradecheck.Code.SUCCESS,
result.code
)
+
+
+class TestUpgradeCheckServiceUserToken(test.NoDBTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.cmd = status.UpgradeCommands()
+
+ def test_service_user_token_not_configured(self):
+ result = self.cmd._check_service_user_token()
+ self.assertEqual(upgradecheck.Code.FAILURE, result.code)
+
+ def test_service_user_token_configured(self):
+ self.flags(send_service_user_token=True, group='service_user')
+ result = self.cmd._check_service_user_token()
+ self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 49cf15ec17..36bcd368dc 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -5672,6 +5672,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([1, 2]),
siblings=[set([1]), set([2])],
mempages=[objects.NUMAPagesTopology(
@@ -5687,6 +5688,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
memory_usage=0,
cpu_usage=0,
+ socket=0,
siblings=[set([3]), set([4])],
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=256, used=0)])
@@ -13513,7 +13515,8 @@ class EvacuateHostTestCase(BaseTestCase):
super(EvacuateHostTestCase, self).tearDown()
def _rebuild(self, on_shared_storage=True, migration=None,
- send_node=False, vm_states_is_stopped=False):
+ send_node=False, vm_states_is_stopped=False,
+ expect_error=False):
network_api = self.compute.network_api
ctxt = context.get_admin_context()
@@ -13560,6 +13563,11 @@ class EvacuateHostTestCase(BaseTestCase):
action='power_off', phase='start'),
mock.call(ctxt, self.inst, self.inst.host,
action='power_off', phase='end')])
+ elif expect_error:
+ mock_notify_rebuild.assert_has_calls([
+ mock.call(ctxt, self.inst, self.compute.host,
+ phase='error', exception=mock.ANY, bdms=bdms)])
+ return
else:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
@@ -13614,14 +13622,15 @@ class EvacuateHostTestCase(BaseTestCase):
mock.patch.object(self.compute, '_get_compute_info',
side_effect=fake_get_compute_info)
) as (mock_inst, mock_get):
- self._rebuild()
+ self.assertRaises(exception.InstanceFaultRollback,
+ self._rebuild, expect_error=True)
# Should be on destination host
instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], self.compute.host)
- self.assertIsNone(instance['node'])
- self.assertTrue(mock_inst.called)
- self.assertTrue(mock_get.called)
+ self.assertEqual('fake_host_2', instance['host'])
+ self.assertEqual('fakenode2', instance['node'])
+ mock_inst.assert_not_called()
+ mock_get.assert_called_once_with(mock.ANY, self.compute.host)
def test_rebuild_on_host_node_passed(self):
patch_get_info = mock.patch.object(self.compute, '_get_compute_info')
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 1c69cd8f1c..73c9d32197 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -2560,10 +2560,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertFalse(mock_get_info.called)
self.assertFalse(mock_sync_power_state.called)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch('nova.compute.manager.ComputeManager.'
'_sync_instance_power_state')
def test_query_driver_power_state_and_sync_not_found_driver(
- self, mock_sync_power_state):
+ self, mock_sync_power_state, mock_claim):
error = exception.InstanceNotFound(instance_id=1)
with mock.patch.object(self.compute.driver,
'get_info', side_effect=error) as mock_get_info:
@@ -6568,6 +6569,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver)
self.compute.rt = fake_rt
+ self.compute.driver._set_nodes([self.node])
+ self.compute.rt.compute_nodes = {self.node: objects.ComputeNode()}
self.allocations = {
uuids.provider1: {
@@ -6857,6 +6860,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_get_arqs.assert_called_once_with(
self.instance.uuid, only_resolved=True)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@@ -6868,7 +6872,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_spawn_called_with_accel_info(self, mock_ins_usage,
mock_ins_create, mock_dev_tag, mock_certs, mock_req_group_map,
- mock_get_allocations, mock_ins_save, mock_spawn):
+ mock_get_allocations, mock_ins_save, mock_spawn, mock_claim):
accel_info = [{'k1': 'v1', 'k2': 'v2'}]
@@ -7142,13 +7146,15 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.security_groups, self.block_device_mapping,
request_spec={}, host_lists=[fake_host_list])
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_rescheduled_exception_with_non_ascii_exception(self,
- mock_notify, mock_save, mock_spawn, mock_build, mock_shutdown):
+ mock_notify, mock_save, mock_spawn, mock_build, mock_shutdown,
+ mock_claim):
exc = exception.NovaException(u's\xe9quence')
mock_build.return_value = self.network_info
@@ -7164,7 +7170,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.accel_uuids)
mock_save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
mock_notify.assert_has_calls([
@@ -7670,6 +7675,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.assertEqual(10, mock_failed.call_count)
mock_succeeded.assert_not_called()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@@ -7677,7 +7683,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def _test_instance_exception(self, exc, raised_exc,
mock_notify, mock_save, mock_spawn,
- mock_build, mock_shutdown):
+ mock_build, mock_shutdown, mock_claim):
"""This method test the instance related InstanceNotFound
and reschedule on exception errors. The test cases get from
arguments.
@@ -7700,7 +7706,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(expected_task_state='block_device_mapping')])
mock_notify.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
@@ -7811,11 +7816,12 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
'_shutdown_instance'),
mock.patch.object(self.compute,
'_validate_instance_group_policy'),
+ mock.patch.object(self.compute.rt, 'instance_claim'),
mock.patch('nova.compute.utils.notify_about_instance_create')
) as (spawn, save,
_build_networks_for_instance, _notify_about_instance_usage,
_shutdown_instance, _validate_instance_group_policy,
- mock_notify):
+ mock_claim, mock_notify):
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
@@ -7846,7 +7852,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(
expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
@@ -7908,11 +7913,12 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
request_spec={}, host_lists=[fake_host_list])
mock_nil.assert_called_once_with(self.instance)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_build_resources')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_build_resources_buildabort_reraise(self, mock_notify, mock_save,
- mock_build):
+ mock_build, mock_claim):
exc = exception.BuildAbortException(
instance_uuid=self.instance.uuid, reason='')
mock_build.side_effect = exc
@@ -7926,7 +7932,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.node, self.limits, self.filter_properties,
request_spec=[], accel_uuids=self.accel_uuids)
- mock_save.assert_called_once_with()
mock_notify.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
extra_usage_info={'image_name': self.image.get('name')}),
@@ -8581,10 +8586,11 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
ctxt, instance, req_networks)
warning_mock.assert_not_called()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch('nova.compute.utils.notify_about_instance_create')
@mock.patch.object(manager.ComputeManager, '_instance_update')
def test_launched_at_in_create_end_notification(self,
- mock_instance_update, mock_notify_instance_create):
+ mock_instance_update, mock_notify_instance_create, mock_claim):
def fake_notify(*args, **kwargs):
if args[2] == 'create.end':
@@ -8624,6 +8630,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.flags(default_access_ip_network_name='test1')
instance = fake_instance.fake_db_instance()
+ @mock.patch.object(self.compute.rt, 'instance_claim')
@mock.patch.object(db, 'instance_update_and_get_original',
return_value=({}, instance))
@mock.patch.object(self.compute.driver, 'spawn')
@@ -8632,7 +8639,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(db, 'instance_extra_update_by_uuid')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
def _check_access_ip(mock_notify, mock_extra, mock_networks,
- mock_spawn, mock_db_update):
+ mock_spawn, mock_db_update, mock_claim):
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
@@ -8653,8 +8660,10 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
_check_access_ip()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_instance_update')
- def test_create_error_on_instance_delete(self, mock_instance_update):
+ def test_create_error_on_instance_delete(self, mock_instance_update,
+ mock_claim):
def fake_notify(*args, **kwargs):
if args[2] == 'create.error':
@@ -8668,7 +8677,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(self.compute,
'_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save',
- side_effect=[None, None, None, exc]),
+ side_effect=[None, None, exc]),
mock.patch.object(self.compute, '_notify_about_instance_usage',
side_effect=fake_notify)
) as (mock_spawn, mock_networks, mock_save, mock_notify):
@@ -8697,7 +8706,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(
self.compute, '_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save'),
- ) as (mock_spawn, mock_networks, mock_save):
+ mock.patch.object(self.compute.rt, 'instance_claim'),
+ ) as (mock_spawn, mock_networks, mock_save, mock_claim):
self.compute._build_and_run_instance(
self.context,
self.instance, self.image, self.injected_files,
@@ -8747,7 +8757,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(self.instance, 'save'),
mock.patch('nova.scheduler.client.report.'
'SchedulerReportClient._get_resource_provider'),
- ) as (mock_spawn, mock_networks, mock_save, mock_get_rp):
+ mock.patch.object(self.compute.rt, 'instance_claim'),
+ ) as (mock_spawn, mock_networks, mock_save, mock_get_rp, mock_claim):
mock_get_rp.return_value = {
'uuid': uuids.rp1,
'name': 'compute1:sriov-agent:ens3'
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index cd36b8987f..919dcb8334 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -181,6 +181,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([1]), set([2])],
pinned_cpus=set()),
@@ -191,6 +192,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([3]), set([4])],
pinned_cpus=set())]),
@@ -2231,14 +2233,19 @@ class TestInstanceClaim(BaseTestCase):
self.rt.compute_nodes = {}
self.assertTrue(self.rt.disabled(_NODENAME))
- with mock.patch.object(self.instance, 'save'):
- claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance,
- _NODENAME, self.allocations, None)
+ # Reset all changes to the instance to make sure that we can detect
+ # any manipulation after the failure.
+ self.instance.obj_reset_changes(recursive=True)
- self.assertEqual(self.rt.host, self.instance.host)
- self.assertEqual(self.rt.host, self.instance.launched_on)
- self.assertEqual(_NODENAME, self.instance.node)
- self.assertIsInstance(claim, claims.NopClaim)
+ with mock.patch.object(self.instance, 'save') as mock_save:
+ self.assertRaises(exc.ComputeResourcesUnavailable,
+ self.rt.instance_claim,
+ mock.sentinel.ctx, self.instance,
+ _NODENAME, self.allocations, None)
+ mock_save.assert_not_called()
+
+ # Make sure the instance was not touched by the failed claim process
+ self.assertEqual(set(), self.instance.obj_what_changed())
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index 0a1e3f54fc..62321bddec 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -646,7 +646,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute.unshelve_instance(
self.context, instance, image=None,
- filter_properties={}, node='fake-node', request_spec=request_spec,
+ filter_properties={}, node='fakenode2', request_spec=request_spec,
accel_uuids=[])
mock_update_pci.assert_called_once_with(
@@ -700,7 +700,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.assertRaises(test.TestingException,
self.compute.unshelve_instance, self.context, instance,
image=shelved_image, filter_properties={},
- node='fake-node', request_spec=fake_spec, accel_uuids=[])
+ node='fakenode2', request_spec=fake_spec, accel_uuids=[])
self.assertEqual(instance.image_ref, initial_image_ref)
@mock.patch.object(objects.InstanceList, 'get_by_filters')
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index eefa7b974f..c551191e4c 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -142,6 +142,22 @@ class TestNeutronClient(test.NoDBTestCase):
self.assertIsInstance(cl.httpclient.auth,
service_token.ServiceTokenAuthWrapper)
+ @mock.patch('nova.service_auth._SERVICE_AUTH')
+ @mock.patch('nova.network.neutron._ADMIN_AUTH')
+ @mock.patch.object(ks_loading, 'load_auth_from_conf_options')
+ def test_admin_with_service_token(
+ self, mock_load, mock_admin_auth, mock_service_auth
+ ):
+ self.flags(send_service_user_token=True, group='service_user')
+
+ admin_context = context.get_admin_context()
+
+ cl = neutronapi.get_client(admin_context)
+ self.assertIsInstance(cl.httpclient.auth,
+ service_token.ServiceTokenAuthWrapper)
+ self.assertEqual(mock_admin_auth, cl.httpclient.auth.user_auth)
+ self.assertEqual(mock_service_auth, cl.httpclient.auth.service_auth)
+
@mock.patch.object(client.Client, "list_networks",
side_effect=exceptions.Unauthorized())
def test_Unauthorized_user(self, mock_list_networks):
@@ -7415,7 +7431,7 @@ class TestAPI(TestAPIBase):
network_id=uuids.network_id, fields='segment_id')
@mock.patch.object(neutronapi, 'get_client')
- def test_get_segment_ids_for_network_with_no_segments(self, mock_client):
+ def test_get_segment_ids_for_network_with_segments_none(self, mock_client):
subnets = {'subnets': [{'segment_id': None}]}
mocked_client = mock.create_autospec(client.Client)
mock_client.return_value = mocked_client
@@ -7431,6 +7447,22 @@ class TestAPI(TestAPIBase):
network_id=uuids.network_id, fields='segment_id')
@mock.patch.object(neutronapi, 'get_client')
+ def test_get_segment_ids_for_network_with_no_segments(self, mock_client):
+ subnets = {'subnets': [{}]}
+ mocked_client = mock.create_autospec(client.Client)
+ mock_client.return_value = mocked_client
+ mocked_client.list_subnets.return_value = subnets
+ with mock.patch.object(
+ self.api, 'has_segment_extension', return_value=True,
+ ):
+ res = self.api.get_segment_ids_for_network(
+ self.context, uuids.network_id)
+ self.assertEqual([], res)
+ mock_client.assert_called_once_with(self.context, admin=True)
+ mocked_client.list_subnets.assert_called_once_with(
+ network_id=uuids.network_id, fields='segment_id')
+
+ @mock.patch.object(neutronapi, 'get_client')
def test_get_segment_ids_for_network_fails(self, mock_client):
mocked_client = mock.create_autospec(client.Client)
mock_client.return_value = mocked_client
diff --git a/nova/tests/unit/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py
index 658c82c20e..f5dcf87e4a 100644
--- a/nova/tests/unit/scheduler/fakes.py
+++ b/nova/tests/unit/scheduler/fakes.py
@@ -34,6 +34,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=16, total=387184, used=0),
@@ -46,6 +47,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=1548736, used=0),
diff --git a/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py b/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py
new file mode 100644
index 0000000000..c6e4abd4cd
--- /dev/null
+++ b/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py
@@ -0,0 +1,97 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler hypervisor version weights.
+"""
+
+from nova.scheduler import weights
+from nova.scheduler.weights import hypervisor_version
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class HypervisorVersionWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weighers = [hypervisor_version.HypervisorVersionWeigher()]
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weighers,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ host_values = [
+ ('host1', 'node1', {'hypervisor_version': 1}),
+ ('host2', 'node2', {'hypervisor_version': 200}),
+ ('host3', 'node3', {'hypervisor_version': 100}),
+ ('host4', 'node4', {'hypervisor_version': 1000}),
+ ]
+ return [fakes.FakeHostState(host, node, values)
+ for host, node, values in host_values]
+
+ def test_multiplier_default(self):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_multiplier_default_full_ordering(self):
+ hostinfo_list = self._get_all_hosts()
+ weighed_hosts = self.weight_handler.get_weighed_objects(
+ self.weighers, hostinfo_list, {}
+ )
+ expected_hosts = [fakes.FakeHostState(host, node, values)
+ for host, node, values in [
+ ('host4', 'node4', {'hypervisor_version': 1000}),
+ ('host2', 'node2', {'hypervisor_version': 200}),
+ ('host3', 'node3', {'hypervisor_version': 100}),
+ ('host1', 'node1', {'hypervisor_version': 1}),
+ ]]
+ for actual, expected in zip(
+ weighed_hosts,
+ expected_hosts
+ ):
+ self.assertEqual(actual.obj.host, expected.host)
+
+ def test_multiplier_none(self):
+ multi = 0.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(multi, weighed_host.weight)
+
+ def test_multiplier_positive(self):
+ multi = 2.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0 * multi, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_multiplier_negative(self):
+ multi = -1.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual('host1', weighed_host.obj.host)
diff --git a/nova/tests/unit/test_service_auth.py b/nova/tests/unit/test_service_auth.py
index 5f07515188..8966af3ce3 100644
--- a/nova/tests/unit/test_service_auth.py
+++ b/nova/tests/unit/test_service_auth.py
@@ -56,3 +56,13 @@ class ServiceAuthTestCase(test.NoDBTestCase):
result = service_auth.get_auth_plugin(self.ctx)
self.assertEqual(1, mock_load.call_count)
self.assertNotIsInstance(result, service_token.ServiceTokenAuthWrapper)
+
+ @mock.patch.object(ks_loading, 'load_auth_from_conf_options',
+ new=mock.Mock())
+ def test_get_auth_plugin_user_auth(self):
+ self.flags(send_service_user_token=True, group='service_user')
+ user_auth = mock.Mock()
+
+ result = service_auth.get_auth_plugin(self.ctx, user_auth=user_auth)
+
+ self.assertEqual(user_auth, result.user_auth)
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
index 07e1774f9a..1e3e50f92b 100644
--- a/nova/tests/unit/virt/hyperv/test_vmops.py
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -1129,7 +1129,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_unplug_vifs.assert_called_once_with(
mock_instance, mock.sentinel.fake_network_info)
mock_disconnect_volumes.assert_called_once_with(
- mock.sentinel.FAKE_BD_INFO)
+ mock.sentinel.FAKE_BD_INFO, force=True)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py
index 66d2c2527f..f289d03632 100644
--- a/nova/tests/unit/virt/hyperv/test_volumeops.py
+++ b/nova/tests/unit/virt/hyperv/test_volumeops.py
@@ -141,7 +141,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
self._volumeops.disconnect_volumes(block_device_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
- block_device_mapping[0]['connection_info'])
+ block_device_mapping[0]['connection_info'], force=False)
+
+ # Verify force=True
+ fake_volume_driver.disconnect_volume.reset_mock()
+ self._volumeops.disconnect_volumes(block_device_info, force=True)
+ fake_volume_driver.disconnect_volume.assert_called_once_with(
+ block_device_mapping[0]['connection_info'], force=True)
@mock.patch('time.sleep')
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
@@ -181,7 +187,7 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
if attach_failed:
fake_volume_driver.disconnect_volume.assert_called_once_with(
- fake_conn_info)
+ fake_conn_info, force=False)
mock_sleep.assert_has_calls(
[mock.call(CONF.hyperv.volume_attach_retry_interval)] *
CONF.hyperv.volume_attach_retry_count)
@@ -203,7 +209,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_volume_driver.assert_called_once_with(
mock.sentinel.conn_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_info)
+ mock.sentinel.conn_info, force=False)
+
+ # Verify force=True
+ fake_volume_driver.disconnect_volume.reset_mock()
+ self._volumeops.disconnect_volume(mock.sentinel.conn_info, force=True)
+ fake_volume_driver.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_info, force=True)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_detach_volume(self, mock_get_volume_driver):
@@ -347,7 +359,13 @@ class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase):
self._base_vol_driver.disconnect_volume(conn_info)
self._conn.disconnect_volume.assert_called_once_with(
- conn_info['data'])
+ conn_info['data'], force=False)
+
+ # Verify force=True
+ self._conn.disconnect_volume.reset_mock()
+ self._base_vol_driver.disconnect_volume(conn_info, force=True)
+ self._conn.disconnect_volume.assert_called_once_with(
+ conn_info['data'], force=True)
@mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_res_path')
def _test_get_disk_resource_path_by_conn_info(self,
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 2b58c7df8b..774498b69f 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -740,6 +740,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
imagebackend.Image._get_driver_format)
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
+ self.cgroups = self.useFixture(nova_fixtures.CGroupsFixture())
# ensure tests perform the same on all host architectures; this is
# already done by the fakelibvirt fixture but we want to change the
@@ -3093,9 +3094,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'fake-flavor', 'fake-image-meta').obj_to_primitive())
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fits(self, is_able):
+ def test_get_guest_config_numa_host_instance_fits(self):
self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -3133,9 +3132,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
+ def test_get_guest_config_numa_host_instance_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
@@ -3563,10 +3560,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
host_topology, inst_topology, numa_tune)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_pci_no_numa_info(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
@@ -3620,10 +3614,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_2pci_no_fit(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -3740,10 +3731,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
None)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
self.flags(cpu_shared_set='2-3', cpu_dedicated_set=None,
group='compute')
@@ -3782,10 +3770,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_non_numa_host_instance_topo(
- self, is_able):
+ def test_get_guest_config_non_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
@@ -3833,10 +3818,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
numa_cfg_cell.memory)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_topo(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_topo(self):
self.flags(cpu_shared_set='0-5', cpu_dedicated_set=None,
group='compute')
@@ -7310,9 +7292,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
[],
image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_cpu_quota(self, is_able):
+ def test_get_guest_config_with_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7648,9 +7628,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(images_type='rbd', group='libvirt')
self._test_get_guest_config_disk_cachemodes('rbd')
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_bogus_cpu_quota(self, is_able):
+ def test_get_guest_config_with_bogus_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7668,9 +7646,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=False)
- def test_get_update_guest_cputune(self, is_able):
+ def test_get_update_guest_cputune(self):
+ # No CPU controller on the host
+ self.cgroups.version = 0
+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
@@ -9743,7 +9722,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._disconnect_volume(
self.context, fake_connection_info, fake_instance_1)
mock_volume_driver.disconnect_volume.assert_called_once_with(
- fake_connection_info, fake_instance_1)
+ fake_connection_info, fake_instance_1, force=False)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@@ -10117,7 +10096,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
device_name='vdc',
),
mock.call.detach_encryptor(**encryption),
- mock.call.disconnect_volume(connection_info, instance)])
+ mock.call.disconnect_volume(
+ connection_info,
+ instance,
+ force=False,
+ )
+ ])
get_device_conf_func = mock_detach_with_retry.mock_calls[0][1][2]
self.assertEqual(mock_guest.get_disk, get_device_conf_func.func)
self.assertEqual(('vdc',), get_device_conf_func.args)
@@ -20495,16 +20479,64 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.context,
mock.sentinel.connection_info,
instance,
- destroy_secrets=False
+ destroy_secrets=False,
+ force=True
),
mock.call(
self.context,
mock.sentinel.connection_info,
instance,
- destroy_secrets=True
+ destroy_secrets=True,
+ force=True
)
])
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
+ @mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._should_disconnect_target',
+ new=mock.Mock(return_value=True))
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._detach_encryptor',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vpmems',
+ new=mock.Mock(return_value=None))
+ def test_cleanup_disconnect_volume(self, mock_vol_driver):
+ """Verify that we call disconnect_volume() with force=True
+
+ cleanup() is called by destroy() when an instance is being deleted and
+ force=True should be passed down to os-brick's disconnect_volume()
+ call, which will ensure removal of devices regardless of errors.
+
+ We need to ensure that devices are removed when an instance is being
+ deleted to avoid leaving leftover devices that could later be
+ erroneously connected by external entities (example: multipathd) to
+ instances that should not have access to the volumes.
+
+ See https://bugs.launchpad.net/nova/+bug/2004555 for details.
+ """
+ connection_info = mock.MagicMock()
+ block_device_info = {
+ 'block_device_mapping': [
+ {
+ 'connection_info': connection_info
+ }
+ ]
+ }
+ instance = objects.Instance(self.context, **self.test_instance)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ drvr.cleanup(
+ self.context,
+ instance,
+ network_info={},
+ block_device_info=block_device_info,
+ destroy_vifs=False,
+ destroy_disks=False,
+ )
+ mock_vol_driver.return_value.disconnect_volume.assert_called_once_with(
+ connection_info, instance, force=True)
+
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_allow_native_luksv1')
def test_swap_volume_native_luks_blocked(self, mock_allow_native_luksv1,
@@ -22348,6 +22380,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.flags(sysinfo_serial="none", group="libvirt")
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
os_vif.initialize()
self.drvr = libvirt_driver.LibvirtDriver(
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index 631b10d81a..a76dc83105 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -1619,25 +1619,59 @@ Active: 8381604 kB
self.host.compare_cpu("cpuxml")
mock_compareCPU.assert_called_once_with("cpuxml", 0)
- def test_is_cpu_control_policy_capable_ok(self):
+ def test_is_cpu_control_policy_capable_via_neither(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=0))
+ self.assertFalse(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv1(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=1))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv2(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=2))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_has_cgroupsv1_cpu_controller_ok(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertTrue(self.host.is_cpu_control_policy_capable())
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv1_cpu_controller())
- def test_is_cpu_control_policy_capable_ko(self):
+ def test_has_cgroupsv1_cpu_controller_ko(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
- @mock.patch('builtins.open', side_effect=IOError)
- def test_is_cpu_control_policy_capable_ioerror(self, mock_open):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv1_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
+
+ def test_has_cgroupsv2_cpu_controller_ok(self):
+ m = mock.mock_open(
+ read_data="cpuset cpu io memory hugetlb pids rdma misc"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv2_cpu_controller())
+
+ def test_has_cgroupsv2_cpu_controller_ko(self):
+ m = mock.mock_open(
+ read_data="memory pids"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
+
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv2_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
def test_get_canonical_machine_type(self):
# this test relies on configuration from the FakeLibvirtFixture
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
index 06065322f6..55054652c3 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
@@ -81,3 +81,23 @@ class LibvirtFibreChannelVolumeDriverTestCase(
self.assertEqual(requested_size, new_size)
libvirt_driver.connector.extend_volume.assert_called_once_with(
connection_info['data'])
+
+ def test_disconnect_volume(self):
+ device_path = '/dev/fake-dev'
+ connection_info = {'data': {'device_path': device_path}}
+
+ libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
+ self.fake_host)
+ libvirt_driver.connector.disconnect_volume = mock.MagicMock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance)
+
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], connection_info['data'], force=False)
+
+ # Verify force=True
+ libvirt_driver.connector.disconnect_volume.reset_mock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance, force=True)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], connection_info['data'], force=True)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
index bd516b1dd6..a1111e0d12 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
@@ -57,10 +57,19 @@ class LibvirtISCSIVolumeDriverTestCase(
device=device_path))
libvirt_driver.disconnect_volume(connection_info,
mock.sentinel.instance)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], None, force=False)
msg = mock_LOG_warning.call_args_list[0]
self.assertIn('Ignoring VolumeDeviceNotFound', msg[0][0])
+ # Verify force=True
+ libvirt_driver.connector.disconnect_volume.reset_mock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance, force=True)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], None, force=True)
+
def test_extend_volume(self):
device_path = '/dev/fake-dev'
connection_info = {'data': {'device_path': device_path}}
diff --git a/nova/tests/unit/virt/libvirt/volume/test_lightos.py b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
index 8a85d73059..f97a696a53 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_lightos.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
@@ -62,7 +62,13 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
connection_info = {'data': disk_info}
lightos_driver.disconnect_volume(connection_info, None)
lightos_driver.connector.disconnect_volume.assert_called_once_with(
- disk_info, None)
+ disk_info, None, force=False)
+
+ # Verify force=True
+ lightos_driver.connector.disconnect_volume.reset_mock()
+ lightos_driver.disconnect_volume(connection_info, None, force=True)
+ lightos_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nvme.py b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
index 3f593841fa..42ef0adc8d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nvme.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
@@ -77,7 +77,13 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
connection_info = {'data': disk_info}
nvme_driver.disconnect_volume(connection_info, None)
nvme_driver.connector.disconnect_volume.assert_called_once_with(
- disk_info, None)
+ disk_info, None, force=False)
+
+ # Verify force=True
+ nvme_driver.connector.disconnect_volume.reset_mock()
+ nvme_driver.disconnect_volume(connection_info, None, force=True)
+ nvme_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
index f0fcba1deb..7d93691d9d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
@@ -49,7 +49,13 @@ class LibvirtScaleIOVolumeDriverTestCase(
conn = {'data': mock.sentinel.conn_data}
sio.disconnect_volume(conn, mock.sentinel.instance)
sio.connector.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_data, None)
+ mock.sentinel.conn_data, None, force=False)
+
+ # Verify force=True
+ sio.connector.disconnect_volume.reset_mock()
+ sio.disconnect_volume(conn, mock.sentinel.instance, force=True)
+ sio.connector.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_data, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_storpool.py b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
index 678d4f8eb4..a3252b8525 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_storpool.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
@@ -53,9 +53,11 @@ class MockStorPoolConnector(object):
}
return {'type': 'block', 'path': test_attached[v]['path']}
- def disconnect_volume(self, connection_info, device_info):
+ def disconnect_volume(self, connection_info, device_info, **kwargs):
self.inst.assertIn('client_id', connection_info)
self.inst.assertIn('volume', connection_info)
+ self.inst.assertIn('force', kwargs)
+ self.inst.assertEqual(self.inst.force, kwargs.get('force'))
v = connection_info['volume']
if v not in test_attached:
@@ -86,6 +88,11 @@ class MockStorPoolInitiator(object):
class LibvirtStorPoolVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
+ def setUp(self):
+ super().setUp()
+ # This is for testing the force flag of disconnect_volume()
+ self.force = False
+
def mock_storpool(f):
def _config_inner_inner1(inst, *args, **kwargs):
@mock.patch(
@@ -175,3 +182,10 @@ class LibvirtStorPoolVolumeDriverTestCase(
libvirt_driver.disconnect_volume(ci_2, mock.sentinel.instance)
self.assertDictEqual({}, test_attached)
+
+ # Connect the volume again so we can detach it again
+ libvirt_driver.connect_volume(ci_2, mock.sentinel.instance)
+ # Verify force=True
+ self.force = True
+ libvirt_driver.disconnect_volume(
+ ci_2, mock.sentinel.instance, force=True)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
index 168efee944..c9e455b193 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
@@ -95,7 +95,13 @@ class LibvirtVZStorageTestCase(test_volume.LibvirtVolumeBaseTestCase):
conn = {'data': mock.sentinel.conn_data}
drv.disconnect_volume(conn, mock.sentinel.instance)
drv.connector.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_data, None)
+ mock.sentinel.conn_data, None, force=False)
+
+ # Verify force=True
+ drv.connector.disconnect_volume.reset_mock()
+ drv.disconnect_volume(conn, mock.sentinel.instance, force=True)
+ drv.connector.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_data, None, force=True)
def test_libvirt_vzstorage_driver_get_config(self):
libvirt_driver = vzstorage.LibvirtVZStorageVolumeDriver(self.fake_host)
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 753ee41550..ab51a3e26c 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -2023,6 +2023,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=0),
@@ -2036,6 +2037,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=1,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=64),
@@ -2049,6 +2051,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=2,
cpu_usage=0,
memory_usage=0,
+ socket=2,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=16)],
@@ -2130,6 +2133,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=160,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=32),
@@ -2170,6 +2174,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2181,6 +2186,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2192,6 +2198,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2258,6 +2265,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=2,
memory_usage=512,
+ socket=0,
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
siblings=[set([0]), set([1]), set([2]), set([3])],
@@ -2269,6 +2277,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=1,
memory_usage=512,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2280,6 +2289,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2330,6 +2340,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=512, used=128,
@@ -2342,6 +2353,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=1048576, total=5, used=2,
@@ -2606,6 +2618,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -2616,6 +2629,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -4162,6 +4176,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4191,6 +4206,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 3]),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -4220,6 +4236,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4248,6 +4265,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4274,6 +4292,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4300,6 +4319,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4326,6 +4346,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4355,6 +4376,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4385,6 +4407,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2, 6, 7]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4417,6 +4440,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
cpu_usage=2,
memory_usage=0,
pinned_cpus=set(),
+ socket=0,
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)]
@@ -4450,6 +4474,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
@@ -4492,6 +4517,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4531,6 +4557,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 5, 6, 7]),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4766,6 +4793,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1])],
mempages=[objects.NUMAPagesTopology(
@@ -4777,6 +4805,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index 58fa3d4c27..ed9f1e3822 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -832,6 +832,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
# This is needed for the live migration tests which spawn off the
# operation for monitoring.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
# When destroying an instance, os-vif will try to execute some commands
# which hang tests so let's just stub out the unplug call to os-vif
# since we don't care about it.
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index e53ebe3cb8..f9080726fb 100644
--- a/nova/tests/unit/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -1276,3 +1276,14 @@ class CinderClientTestCase(test.NoDBTestCase):
admin_ctx = context.get_admin_context()
params = cinder._get_cinderclient_parameters(admin_ctx)
self.assertEqual(params[0], mock_admin_auth)
+
+ @mock.patch('nova.service_auth._SERVICE_AUTH')
+ @mock.patch('nova.volume.cinder._ADMIN_AUTH')
+ def test_admin_context_without_user_token_but_with_service_token(
+ self, mock_admin_auth, mock_service_auth
+ ):
+ self.flags(send_service_user_token=True, group='service_user')
+ admin_ctx = context.get_admin_context()
+ params = cinder._get_cinderclient_parameters(admin_ctx)
+ self.assertEqual(mock_admin_auth, params[0].user_auth)
+ self.assertEqual(mock_service_auth, params[0].service_auth)
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index c8f8bb2481..9693e405d3 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -869,7 +869,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
continue
break
@@ -895,7 +895,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
cpuset_reserved = _get_reserved(
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
return
LOG.debug('Selected cores for pinning: %s, in cell %s', pinning,
host_cell.id)
@@ -2583,6 +2583,7 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
cpuset=host_cell.cpuset,
pcpuset=host_cell.pcpuset,
memory=host_cell.memory,
+ socket=host_cell.socket,
cpu_usage=0,
memory_usage=0,
mempages=host_cell.mempages,
@@ -2607,8 +2608,10 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
None, fields.CPUAllocationPolicy.SHARED,
):
continue
-
- pinned_cpus = set(instance_cell.cpu_pinning.values())
+ if instance_cell.cpu_pinning:
+ pinned_cpus = set(instance_cell.cpu_pinning.values())
+ else:
+ pinned_cpus = set()
if instance_cell.cpuset_reserved:
pinned_cpus |= instance_cell.cpuset_reserved
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 1291f975ad..ba18c85cf7 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -146,6 +146,14 @@ class HyperVDriver(driver.ComputeDriver):
'in Rocky.')
def init_host(self, host):
+ LOG.warning(
+ 'The hyperv driver is not tested by the OpenStack project nor '
+ 'does it have clear maintainer(s) and thus its quality can not be '
+ 'ensured. It should be considered experimental and may be removed '
+ 'in a future release. If you are using the driver in production '
+ 'please let us know via the openstack-discuss mailing list.'
+ )
+
self._serialconsoleops.start_console_handlers()
event_handler = eventhandler.InstanceEventHandler(
state_change_callback=self.emit_event)
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 3ec7e90c30..08adeada76 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -747,7 +747,7 @@ class VMOps(object):
# should be disconnected even if the VM doesn't exist anymore,
# so they are not leaked.
self.unplug_vifs(instance, network_info)
- self._volumeops.disconnect_volumes(block_device_info)
+ self._volumeops.disconnect_volumes(block_device_info, force=True)
if destroy_disks:
self._delete_disk_files(instance_name)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index da5b40f375..d2bfed2441 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -59,10 +59,10 @@ class VolumeOps(object):
for vol in volumes:
self.attach_volume(vol['connection_info'], instance_name)
- def disconnect_volumes(self, block_device_info):
+ def disconnect_volumes(self, block_device_info, force=False):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
- self.disconnect_volume(vol['connection_info'])
+ self.disconnect_volume(vol['connection_info'], force=force)
def attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
@@ -116,9 +116,9 @@ class VolumeOps(object):
volume_driver.set_disk_qos_specs(connection_info,
qos_specs)
- def disconnect_volume(self, connection_info):
+ def disconnect_volume(self, connection_info, force=False):
volume_driver = self._get_volume_driver(connection_info)
- volume_driver.disconnect_volume(connection_info)
+ volume_driver.disconnect_volume(connection_info, force=force)
def detach_volume(self, connection_info, instance_name):
LOG.debug("Detaching volume: %(connection_info)s "
@@ -231,8 +231,8 @@ class BaseVolumeDriver(object):
def connect_volume(self, connection_info):
return self._connector.connect_volume(connection_info['data'])
- def disconnect_volume(self, connection_info):
- self._connector.disconnect_volume(connection_info['data'])
+ def disconnect_volume(self, connection_info, force=False):
+ self._connector.disconnect_volume(connection_info['data'], force=force)
def get_disk_resource_path(self, connection_info):
disk_paths = self._connector.get_volume_paths(connection_info['data'])
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index fe48960296..ff70c9b744 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1657,7 +1657,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
self._disconnect_volume(
context, connection_info, instance,
- destroy_secrets=destroy_secrets)
+ destroy_secrets=destroy_secrets, force=True)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if cleanup_instance_disks:
@@ -1974,7 +1974,7 @@ class LibvirtDriver(driver.ComputeDriver):
return (False if connection_count > 1 else True)
def _disconnect_volume(self, context, connection_info, instance,
- encryption=None, destroy_secrets=True):
+ encryption=None, destroy_secrets=True, force=False):
self._detach_encryptor(
context,
connection_info,
@@ -1986,7 +1986,8 @@ class LibvirtDriver(driver.ComputeDriver):
multiattach = connection_info.get('multiattach', False)
if self._should_disconnect_target(
context, instance, multiattach, vol_driver, volume_id):
- vol_driver.disconnect_volume(connection_info, instance)
+ vol_driver.disconnect_volume(
+ connection_info, instance, force=force)
else:
LOG.info('Detected multiple connections on this host for '
'volume: %(volume)s, skipping target disconnect.',
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index 9658a5791d..1ae86d9f47 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -1643,15 +1643,44 @@ class Host(object):
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
+ return self._has_cgroupsv1_cpu_controller() or \
+ self._has_cgroupsv2_cpu_controller()
+
+ def _has_cgroupsv1_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V1...")
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
+ return True
+ LOG.debug("CPU controller missing on host.")
+ return False
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V1. "
+ "Deemed host to be missing controller by this approach.")
+ return False
+
+ def _has_cgroupsv2_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V2...")
+ try:
+ with open("/sys/fs/cgroup/cgroup.controllers", "r") as fd:
+ for line in fd.readlines():
+ bits = line.split()
+ if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
return True
+ LOG.debug("CPU controller missing on host.")
return False
- except IOError:
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V2. "
+ "Deemed host to be missing controller by this approach.")
return False
def get_canonical_machine_type(self, arch, machine) -> str:
diff --git a/nova/virt/libvirt/volume/fibrechannel.py b/nova/virt/libvirt/volume/fibrechannel.py
index 22c65e99c0..1752f6d0cc 100644
--- a/nova/virt/libvirt/volume/fibrechannel.py
+++ b/nova/virt/libvirt/volume/fibrechannel.py
@@ -59,7 +59,7 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
connection_info['data']['multipath_id'] = \
device_info['multipath_id']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach FC Volume", instance=instance)
@@ -69,11 +69,12 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
# the 2nd param of disconnect_volume and be consistent
# with the rest of the connectors.
self.connector.disconnect_volume(connection_info['data'],
- connection_info['data'])
+ connection_info['data'],
+ force=force)
LOG.debug("Disconnected FC Volume", instance=instance)
super(LibvirtFibreChannelVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/fs.py b/nova/virt/libvirt/volume/fs.py
index 5fb9af4a52..992ef45016 100644
--- a/nova/virt/libvirt/volume/fs.py
+++ b/nova/virt/libvirt/volume/fs.py
@@ -116,7 +116,7 @@ class LibvirtMountedFileSystemVolumeDriver(LibvirtBaseFileSystemVolumeDriver,
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
vol_name = connection_info['data']['name']
mountpoint = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/iscsi.py b/nova/virt/libvirt/volume/iscsi.py
index 564bac14cc..2b25972a49 100644
--- a/nova/virt/libvirt/volume/iscsi.py
+++ b/nova/virt/libvirt/volume/iscsi.py
@@ -66,19 +66,20 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume", instance=instance)
try:
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return
LOG.debug("Disconnected iSCSI Volume", instance=instance)
super(LibvirtISCSIVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/lightos.py b/nova/virt/libvirt/volume/lightos.py
index d6d393994e..6a22bf6dc6 100644
--- a/nova/virt/libvirt/volume/lightos.py
+++ b/nova/virt/libvirt/volume/lightos.py
@@ -42,14 +42,15 @@ class LibvirtLightOSVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
LOG.debug("Connecting NVMe volume with device_info %s", device_info)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk. instance:%s, volume_id:%s",
connection_info.get("instance", ""),
connection_info.get("volume_id", ""))
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
super(LibvirtLightOSVolumeDriver, self).disconnect_volume(
- connection_info, instance)
+ connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size=None):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/nvme.py b/nova/virt/libvirt/volume/nvme.py
index 7436552812..e2977c3572 100644
--- a/nova/virt/libvirt/volume/nvme.py
+++ b/nova/virt/libvirt/volume/nvme.py
@@ -45,13 +45,13 @@ class LibvirtNVMEVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk", instance=instance)
self.connector.disconnect_volume(
- connection_info['data'], None)
+ connection_info['data'], None, force=force)
super(LibvirtNVMEVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/quobyte.py b/nova/virt/libvirt/volume/quobyte.py
index bb7a770e57..2eb4bcfb42 100644
--- a/nova/virt/libvirt/volume/quobyte.py
+++ b/nova/virt/libvirt/volume/quobyte.py
@@ -189,7 +189,7 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
instance=instance)
@utils.synchronized('connect_qb_volume')
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/scaleio.py b/nova/virt/libvirt/volume/scaleio.py
index 7c414c2870..04a9423e8e 100644
--- a/nova/virt/libvirt/volume/scaleio.py
+++ b/nova/virt/libvirt/volume/scaleio.py
@@ -57,12 +57,13 @@ class LibvirtScaleIOVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
instance=instance)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
- self.connector.disconnect_volume(connection_info['data'], None)
+ def disconnect_volume(self, connection_info, instance, force=False):
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Disconnected volume", instance=instance)
super(LibvirtScaleIOVolumeDriver, self).disconnect_volume(
- connection_info, instance)
+ connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
LOG.debug("calling os-brick to extend ScaleIO Volume",
diff --git a/nova/virt/libvirt/volume/smbfs.py b/nova/virt/libvirt/volume/smbfs.py
index d112af750c..9de1ce23cd 100644
--- a/nova/virt/libvirt/volume/smbfs.py
+++ b/nova/virt/libvirt/volume/smbfs.py
@@ -52,7 +52,7 @@ class LibvirtSMBFSVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
device_path = self._get_device_path(connection_info)
connection_info['data']['device_path'] = device_path
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/storpool.py b/nova/virt/libvirt/volume/storpool.py
index 0e71221f5b..e6dffca39a 100644
--- a/nova/virt/libvirt/volume/storpool.py
+++ b/nova/virt/libvirt/volume/storpool.py
@@ -47,10 +47,11 @@ class LibvirtStorPoolVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
device_info, instance=instance)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
LOG.debug("Detaching StorPool volume %s",
connection_info['data']['volume'], instance=instance)
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Detached StorPool volume", instance=instance)
def extend_volume(self, connection_info, instance, requested_size):
diff --git a/nova/virt/libvirt/volume/volume.py b/nova/virt/libvirt/volume/volume.py
index 6d650c80e6..f76c3618b2 100644
--- a/nova/virt/libvirt/volume/volume.py
+++ b/nova/virt/libvirt/volume/volume.py
@@ -135,7 +135,7 @@ class LibvirtBaseVolumeDriver(object):
"""Connect the volume."""
pass
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
pass
diff --git a/nova/virt/libvirt/volume/vzstorage.py b/nova/virt/libvirt/volume/vzstorage.py
index 85ffb45076..babfdef55c 100644
--- a/nova/virt/libvirt/volume/vzstorage.py
+++ b/nova/virt/libvirt/volume/vzstorage.py
@@ -126,9 +126,10 @@ class LibvirtVZStorageVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
return _connect_volume(connection_info, instance)
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach Vzstorage Volume",
instance=instance)
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Disconnected Vzstorage Volume", instance=instance)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 01efcfec19..f5328148d2 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -91,12 +91,14 @@ def _get_auth(context):
# from them generated from 'context.get_admin_context'
# which only set is_admin=True but is without token.
# So add load_auth_plugin when this condition appear.
+ user_auth = None
if context.is_admin and not context.auth_token:
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
- return _ADMIN_AUTH
- else:
- return service_auth.get_auth_plugin(context)
+ user_auth = _ADMIN_AUTH
+
+ # When user_auth = None, user_auth will be extracted from the context.
+ return service_auth.get_auth_plugin(context, user_auth=user_auth)
# NOTE(efried): Bug #1752152
diff --git a/releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml b/releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml
new file mode 100644
index 0000000000..85b874fb69
--- /dev/null
+++ b/releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The hyperv driver is marked as experimental and may be removed in a
+ future release. The driver is not tested by the OpenStack project and
+ does not have a clear maintainer.
diff --git a/releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml b/releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml
new file mode 100644
index 0000000000..31f2c70926
--- /dev/null
+++ b/releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml
@@ -0,0 +1,20 @@
+---
+features:
+ - |
+ A new hypervisor version weigher has been added to prefer selecting hosts
+ with newer hypervisors installed. For the libvirt driver, this is the version
+ of libvirt on the compute node not the version of qemu. As with all
+ weighers this is enabled by default and its behavior can be modified using
+ the new ``hypervisor_version_weight_multiplier`` config option in the
+ ``filter_scheduler`` section.
+upgrade:
+ - |
+ A new hypervisor version weigher has been added that will prefer selecting
+ hosts with a newer hypervisor installed. This can help simplify rolling
+ upgrades by preferring the already upgraded hosts when moving workloads around
+ using live or cold migration. To restore the old behavior either remove
+ the weigher from the list of enabled weighers or set
+ ``[filter_scheduler] hypervisor_version_weight_multiplier=0``. The default
+ value of the hypervisor_version_weight_multiplier is 1 so only a mild
+ preference is given to new hosts, higher values will make the effect
+ more pronounced and negative values will prefer older hosts.
diff --git a/releasenotes/notes/service-user-token-421d067c16257782.yaml b/releasenotes/notes/service-user-token-421d067c16257782.yaml
new file mode 100644
index 0000000000..d3af14fbb8
--- /dev/null
+++ b/releasenotes/notes/service-user-token-421d067c16257782.yaml
@@ -0,0 +1,11 @@
+upgrade:
+ - |
+ Configuration of service user tokens is now **required** for all Nova services
+ to ensure security of block-storage volume data.
+
+ All Nova configuration files must configure the ``[service_user]`` section as
+ described in the `documentation`__.
+
+ See https://bugs.launchpad.net/nova/+bug/2004555 for more details.
+
+ __ https://docs.openstack.org/nova/latest/admin/configuration/service-user-token.html
diff --git a/tools/check-cherry-picks.sh b/tools/check-cherry-picks.sh
index 46cef8c225..3042aa1659 100755
--- a/tools/check-cherry-picks.sh
+++ b/tools/check-cherry-picks.sh
@@ -23,7 +23,7 @@ hashes=$(git show --format='%b' --quiet $commit_hash | sed -nr 's/^.cherry picke
checked=0
branches+=""
for hash in $hashes; do
- branch=$(git branch -a --contains "$hash" 2>/dev/null| grep -oE '(master|stable/[a-z]+)')
+ branch=$(git branch -a --contains "$hash" 2>/dev/null| grep -oE '(master|stable/[a-z0-9.]+)')
if [ $? -ne 0 ]; then
echo "Cherry pick hash $hash not on any master or stable branches"
exit 1