summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml3
-rw-r--r--doc/source/admin/configuration/cross-cell-resize.rst2
-rw-r--r--doc/source/admin/configuration/index.rst1
-rw-r--r--doc/source/admin/configuration/service-user-token.rst59
-rw-r--r--doc/source/admin/live-migration-usage.rst2
-rw-r--r--doc/source/admin/migrate-instance-with-snapshot.rst2
-rw-r--r--doc/source/admin/support-compute.rst64
-rw-r--r--doc/source/install/compute-install-obs.rst20
-rw-r--r--doc/source/install/compute-install-rdo.rst20
-rw-r--r--doc/source/install/compute-install-ubuntu.rst20
-rw-r--r--doc/source/install/controller-install-obs.rst20
-rw-r--r--doc/source/install/controller-install-rdo.rst20
-rw-r--r--doc/source/install/controller-install-ubuntu.rst20
-rw-r--r--nova/api/openstack/wsgi.py6
-rw-r--r--nova/cmd/status.py11
-rw-r--r--nova/network/neutron.py8
-rw-r--r--nova/service_auth.py6
-rw-r--r--nova/tests/fixtures/nova.py71
-rw-r--r--nova/tests/functional/libvirt/base.py1
-rw-r--r--nova/tests/functional/libvirt/test_evacuate.py1
-rw-r--r--nova/tests/functional/libvirt/test_numa_servers.py70
-rw-r--r--nova/tests/functional/libvirt/test_vpmem.py1
-rw-r--r--nova/tests/functional/regressions/test_bug_1595962.py1
-rw-r--r--nova/tests/functional/regressions/test_bug_1995153.py107
-rw-r--r--nova/tests/unit/cmd/test_status.py16
-rw-r--r--nova/tests/unit/compute/test_compute.py2
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py2
-rw-r--r--nova/tests/unit/network/test_neutron.py16
-rw-r--r--nova/tests/unit/scheduler/fakes.py2
-rw-r--r--nova/tests/unit/test_service_auth.py10
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeops.py26
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py111
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py64
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py20
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_iscsi.py9
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_lightos.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nvme.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_scaleio.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_storpool.py16
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_vzstorage.py8
-rw-r--r--nova/tests/unit/virt/test_hardware.py29
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py1
-rw-r--r--nova/tests/unit/volume/test_cinder.py11
-rw-r--r--nova/virt/hardware.py11
-rw-r--r--nova/virt/hyperv/vmops.py2
-rw-r--r--nova/virt/hyperv/volumeops.py12
-rw-r--r--nova/virt/libvirt/driver.py7
-rw-r--r--nova/virt/libvirt/host.py31
-rw-r--r--nova/virt/libvirt/volume/fibrechannel.py7
-rw-r--r--nova/virt/libvirt/volume/fs.py2
-rw-r--r--nova/virt/libvirt/volume/iscsi.py7
-rw-r--r--nova/virt/libvirt/volume/lightos.py7
-rw-r--r--nova/virt/libvirt/volume/nvme.py6
-rw-r--r--nova/virt/libvirt/volume/quobyte.py2
-rw-r--r--nova/virt/libvirt/volume/scaleio.py7
-rw-r--r--nova/virt/libvirt/volume/smbfs.py2
-rw-r--r--nova/virt/libvirt/volume/storpool.py5
-rw-r--r--nova/virt/libvirt/volume/volume.py2
-rw-r--r--nova/virt/libvirt/volume/vzstorage.py5
-rw-r--r--nova/volume/cinder.py8
-rw-r--r--releasenotes/notes/service-user-token-421d067c16257782.yaml11
-rwxr-xr-xtools/check-cherry-picks.sh2
63 files changed, 856 insertions, 185 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 29918cafc8..90d15f4d17 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -594,12 +594,13 @@
irrelevant-files: *nova-base-irrelevant-files
required-projects:
- openstack/nova
+ timeout: 9000
pre-run:
- playbooks/ceph/glance-setup.yaml
vars:
# NOTE(danms): Increase our swap size since we're dealing with
# larger images and trigger OOMs.
- configure_swap_size: 4096
+ configure_swap_size: 8192
# NOTE(danms): These tests create an empty non-raw image, which nova
# will refuse because we set never_download_image_if_on_rbd in this job.
# Just skip these tests for this case.
diff --git a/doc/source/admin/configuration/cross-cell-resize.rst b/doc/source/admin/configuration/cross-cell-resize.rst
index e51e425774..0c34fd13f5 100644
--- a/doc/source/admin/configuration/cross-cell-resize.rst
+++ b/doc/source/admin/configuration/cross-cell-resize.rst
@@ -284,7 +284,7 @@ Troubleshooting
Timeouts
~~~~~~~~
-Configure a :ref:`service user <user_token_timeout>` in case the user token
+Configure a :ref:`service user <service_user_token>` in case the user token
times out, e.g. during the snapshot and download of a large server image.
If RPC calls are timing out with a ``MessagingTimeout`` error in the logs,
diff --git a/doc/source/admin/configuration/index.rst b/doc/source/admin/configuration/index.rst
index 233597b1fe..f5b6fde9da 100644
--- a/doc/source/admin/configuration/index.rst
+++ b/doc/source/admin/configuration/index.rst
@@ -19,6 +19,7 @@ A list of config options based on different topics can be found below:
.. toctree::
:maxdepth: 1
+ /admin/configuration/service-user-token
/admin/configuration/api
/admin/configuration/resize
/admin/configuration/cross-cell-resize
diff --git a/doc/source/admin/configuration/service-user-token.rst b/doc/source/admin/configuration/service-user-token.rst
new file mode 100644
index 0000000000..740730af1d
--- /dev/null
+++ b/doc/source/admin/configuration/service-user-token.rst
@@ -0,0 +1,59 @@
+.. _service_user_token:
+
+===================
+Service User Tokens
+===================
+
+.. note::
+
+ Configuration of service user tokens is **required** for every Nova service
+ for security reasons. See https://bugs.launchpad.net/nova/+bug/2004555 for
+ details.
+
+Configure Nova to send service user tokens alongside regular user tokens when
+making REST API calls to other services. The identity service (Keystone) will
+authenticate a request using the service user token if the regular user token
+has expired.
+
+This is important when long-running operations such as live migration or
+snapshot take long enough to exceed the expiry of the user token. Without the
+service token, if a long-running operation exceeds the expiry of the user
+token, post operations such as cleanup after a live migration could fail when
+Nova calls other service APIs like block-storage (Cinder) or networking
+(Neutron).
+
+The service token is also used by services to validate whether the API caller
+is a service. Some service APIs are restricted to service users only.
+
+To set up service tokens, create a ``nova`` service user and ``service`` role
+in the identity service (Keystone) and assign the ``service`` role to the
+``nova`` service user.
+
+Then, configure the :oslo.config:group:`service_user` section of the Nova
+configuration file, for example:
+
+.. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://104.130.216.102/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = secretservice
+ ...
+
+And configure the other identity options as necessary for the service user,
+much like you would configure nova to work with the image service (Glance) or
+networking service (Neutron).
+
+.. note::
+
+ Please note that the role assigned to the :oslo.config:group:`service_user`
+ needs to be in the configured
+ :oslo.config:option:`keystone_authtoken.service_token_roles` of other
+ services such as block-storage (Cinder), image (Glance), and networking
+ (Neutron).
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index 32c67c2b0a..dc27574f91 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -320,4 +320,4 @@ To make live-migration succeed, you have several options:
If live migrations routinely timeout or fail during cleanup operations due
to the user token timing out, consider configuring nova to use
-:ref:`service user tokens <user_token_timeout>`.
+:ref:`service user tokens <service_user_token>`.
diff --git a/doc/source/admin/migrate-instance-with-snapshot.rst b/doc/source/admin/migrate-instance-with-snapshot.rst
index 65059679ab..230431091e 100644
--- a/doc/source/admin/migrate-instance-with-snapshot.rst
+++ b/doc/source/admin/migrate-instance-with-snapshot.rst
@@ -67,7 +67,7 @@ Create a snapshot of the instance
If snapshot operations routinely fail because the user token times out
while uploading a large disk image, consider configuring nova to use
- :ref:`service user tokens <user_token_timeout>`.
+ :ref:`service user tokens <service_user_token>`.
#. Use the :command:`openstack image list` command to check the status
until the status is ``ACTIVE``:
diff --git a/doc/source/admin/support-compute.rst b/doc/source/admin/support-compute.rst
index 8522e51d79..31e32fd1dd 100644
--- a/doc/source/admin/support-compute.rst
+++ b/doc/source/admin/support-compute.rst
@@ -478,67 +478,3 @@ Ensure the ``compute`` endpoint in the identity service catalog is pointing
at ``/v2.1`` instead of ``/v2``. The former route supports microversions,
while the latter route is considered the legacy v2.0 compatibility-mode
route which renders all requests as if they were made on the legacy v2.0 API.
-
-
-.. _user_token_timeout:
-
-User token times out during long-running operations
----------------------------------------------------
-
-Problem
-~~~~~~~
-
-Long-running operations such as live migration or snapshot can sometimes
-overrun the expiry of the user token. In such cases, post operations such
-as cleaning up after a live migration can fail when the nova-compute service
-needs to cleanup resources in other services, such as in the block-storage
-(cinder) or networking (neutron) services.
-
-For example:
-
-.. code-block:: console
-
- 2018-12-17 13:47:29.591 16987 WARNING nova.virt.libvirt.migration [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live migration not completed after 2400 sec
- 2018-12-17 13:47:30.097 16987 WARNING nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Migration operation was cancelled
- 2018-12-17 13:47:30.299 16987 ERROR nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live Migration failure: operation aborted: migration job: canceled by client: libvirtError: operation aborted: migration job: canceled by client
- 2018-12-17 13:47:30.685 16987 INFO nova.compute.manager [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Swapping old allocation on 3e32d595-bd1f-4136-a7f4-c6703d2fbe18 held by migration 17bec61d-544d-47e0-a1c1-37f9d7385286 for instance
- 2018-12-17 13:47:32.450 16987 ERROR nova.volume.cinder [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] Delete attachment failed for attachment 58997d5b-24f0-4073-819e-97916fb1ee19. Error: The request you have made requires authentication. (HTTP 401) Code: 401: Unauthorized: The request you have made requires authentication. (HTTP 401)
-
-Solution
-~~~~~~~~
-
-Configure nova to use service user tokens to supplement the regular user token
-used to initiate the operation. The identity service (keystone) will then
-authenticate a request using the service user token if the user token has
-already expired.
-
-To use, create a service user in the identity service similar as you would when
-creating the ``nova`` service user.
-
-Then configure the :oslo.config:group:`service_user` section of the nova
-configuration file, for example:
-
-.. code-block:: ini
-
- [service_user]
- send_service_user_token = True
- auth_type = password
- project_domain_name = Default
- project_name = service
- user_domain_name = Default
- password = secretservice
- username = nova
- auth_url = https://104.130.216.102/identity
- ...
-
-And configure the other identity options as necessary for the service user,
-much like you would configure nova to work with the image service (glance)
-or networking service.
-
-.. note::
-
- Please note that the role of the :oslo.config:group:`service_user` you
- configure needs to be a superset of
- :oslo.config:option:`keystone_authtoken.service_token_roles` (The option
- :oslo.config:option:`keystone_authtoken.service_token_roles` is configured
- in cinder, glance and neutron).
diff --git a/doc/source/install/compute-install-obs.rst b/doc/source/install/compute-install-obs.rst
index c5c1d29fb3..c227b6eba4 100644
--- a/doc/source/install/compute-install-obs.rst
+++ b/doc/source/install/compute-install-obs.rst
@@ -92,6 +92,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/compute-install-rdo.rst b/doc/source/install/compute-install-rdo.rst
index 0a5ad685a6..0c6203a667 100644
--- a/doc/source/install/compute-install-rdo.rst
+++ b/doc/source/install/compute-install-rdo.rst
@@ -84,6 +84,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/compute-install-ubuntu.rst b/doc/source/install/compute-install-ubuntu.rst
index 8605c73316..baf0585e52 100644
--- a/doc/source/install/compute-install-ubuntu.rst
+++ b/doc/source/install/compute-install-ubuntu.rst
@@ -74,6 +74,26 @@ Install and configure components
Comment out or remove any other options in the
``[keystone_authtoken]`` section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/controller-install-obs.rst b/doc/source/install/controller-install-obs.rst
index 18499612c3..01b7bb0f5a 100644
--- a/doc/source/install/controller-install-obs.rst
+++ b/doc/source/install/controller-install-obs.rst
@@ -260,6 +260,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/doc/source/install/controller-install-rdo.rst b/doc/source/install/controller-install-rdo.rst
index fd2419631e..b6098f1776 100644
--- a/doc/source/install/controller-install-rdo.rst
+++ b/doc/source/install/controller-install-rdo.rst
@@ -247,6 +247,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/doc/source/install/controller-install-ubuntu.rst b/doc/source/install/controller-install-ubuntu.rst
index 7282b0b2e2..1363a98ba8 100644
--- a/doc/source/install/controller-install-ubuntu.rst
+++ b/doc/source/install/controller-install-ubuntu.rst
@@ -237,6 +237,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 1d17ce1c9f..e64b4a2016 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -538,12 +538,6 @@ class Resource(wsgi.Application):
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
- LOG.debug(f'Request method failure captured:\n'
- f' request: {request}\n'
- f' method: {meth}\n'
- f' exception: {ex}\n'
- f' action_args: {action_args}\n',
- exc_info=1)
response = ex
if not response:
diff --git a/nova/cmd/status.py b/nova/cmd/status.py
index 29e4a5d01e..4a4e28d7e8 100644
--- a/nova/cmd/status.py
+++ b/nova/cmd/status.py
@@ -271,6 +271,15 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+ def _check_service_user_token(self):
+ if not CONF.service_user.send_service_user_token:
+ msg = (_("""
+Service user token configuration is required for all Nova services.
+For more details see the following:
+https://docs.openstack.org/latest/nova/admin/configuration/service-user-token.html""")) # noqa
+ return upgradecheck.Result(upgradecheck.Code.FAILURE, msg)
+ return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+
# The format of the check functions is to return an upgradecheck.Result
# object with the appropriate upgradecheck.Code and details set. If the
# check hits warnings or failures then those should be stored in the
@@ -294,6 +303,8 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
(_('Older than N-1 computes'), _check_old_computes),
# Added in Wallaby
(_('hw_machine_type unset'), _check_machine_type_set),
+ # Added in Bobcat
+ (_('Service User Token Configuration'), _check_service_user_token),
)
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index 163294f041..dd3e4de05d 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -222,13 +222,15 @@ def _get_auth_plugin(context, admin=False):
# support some services (metadata API) where an admin context is used
# without an auth token.
global _ADMIN_AUTH
+ user_auth = None
if admin or (context.is_admin and not context.auth_token):
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
- return _ADMIN_AUTH
+ user_auth = _ADMIN_AUTH
- if context.auth_token:
- return service_auth.get_auth_plugin(context)
+ if context.auth_token or user_auth:
+ # When user_auth = None, user_auth will be extracted from the context.
+ return service_auth.get_auth_plugin(context, user_auth=user_auth)
# We did not get a user token and we should not be using
# an admin token so log an error
diff --git a/nova/service_auth.py b/nova/service_auth.py
index f5ae0646d8..aa8fd8fa12 100644
--- a/nova/service_auth.py
+++ b/nova/service_auth.py
@@ -30,8 +30,10 @@ def reset_globals():
_SERVICE_AUTH = None
-def get_auth_plugin(context):
- user_auth = context.get_auth_plugin()
+def get_auth_plugin(context, user_auth=None):
+ # user_auth may be passed in when the RequestContext is anonymous, such as
+ # when get_admin_context() is used for API calls by nova-manage.
+ user_auth = user_auth or context.get_auth_plugin()
if CONF.service_user.send_service_user_token:
global _SERVICE_AUTH
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index 5fd893e7dc..abfc3ecc6c 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -1318,6 +1318,77 @@ class PrivsepFixture(fixtures.Fixture):
nova.privsep.sys_admin_pctxt, 'client_mode', False))
+class CGroupsFixture(fixtures.Fixture):
+ """Mocks checks made for available subsystems on the host's control group.
+
+ The fixture mocks all calls made on the host to verify the capabilities
+ provided by its kernel. Through this, one can simulate the underlying
+ system hosts work on top of and have tests react to expected outcomes from
+ such.
+
+ Use sample:
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ >>> cgroups = self.useFixture(CGroupsFixture(version=2))
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ ... cgroups.version = 2
+
+ :attr version: Arranges mocks to simulate the host interact with nova
+ following the given version of cgroups.
+ Available values are:
+ - 0: All checks related to cgroups will return False.
+ - 1: Checks related to cgroups v1 will return True.
+ - 2: Checks related to cgroups v2 will return True.
+ Defaults to 1.
+ """
+
+ def __init__(self, version=1):
+ self._cpuv1 = None
+ self._cpuv2 = None
+
+ self._version = version
+
+ @property
+ def version(self):
+ return self._version
+
+ @version.setter
+ def version(self, value):
+ self._version = value
+ self._update_mocks()
+
+ def setUp(self):
+ super().setUp()
+ self._cpuv1 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv1_cpu_controller')).mock
+ self._cpuv2 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv2_cpu_controller')).mock
+ self._update_mocks()
+
+ def _update_mocks(self):
+ if not self._cpuv1:
+ return
+
+ if not self._cpuv2:
+ return
+
+ if self.version == 0:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 1:
+ self._cpuv1.return_value = True
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 2:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = True
+ return
+
+ raise ValueError(f"Unknown cgroups version: '{self.version}'.")
+
+
class NoopQuotaDriverFixture(fixtures.Fixture):
"""A fixture to run tests using the NoopQuotaDriver.
diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py
index 7b6ee10631..1ee46a3217 100644
--- a/nova/tests/functional/libvirt/base.py
+++ b/nova/tests/functional/libvirt/base.py
@@ -43,6 +43,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
super(ServersTestBase, self).setUp()
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
self.useFixture(nova_fixtures.OSBrickFixture())
diff --git a/nova/tests/functional/libvirt/test_evacuate.py b/nova/tests/functional/libvirt/test_evacuate.py
index 92d7ffba29..0e89a3cdb6 100644
--- a/nova/tests/functional/libvirt/test_evacuate.py
+++ b/nova/tests/functional/libvirt/test_evacuate.py
@@ -429,6 +429,7 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.GlanceFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
fake_network.set_stub_network_methods(self)
api_fixture = self.useFixture(
diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py
index bb428159ad..5b73e1b965 100644
--- a/nova/tests/functional/libvirt/test_numa_servers.py
+++ b/nova/tests/functional/libvirt/test_numa_servers.py
@@ -346,6 +346,76 @@ class NUMAServersTest(NUMAServersTestBase):
# There shouldn't be any hosts available to satisfy this request
self._run_build_test(flavor_id, end_status='ERROR')
+ def test_create_server_with_mixed_policy_asymmetric_multi_numa(self):
+ """Boot an instance stretched to two NUMA nodes requesting only
+ shared CPUs in one NUMA and only dedicated in the other NUMA node.
+ """
+ # shared dedicated
+ # NUMA0 pCPU | 0 | 2 3
+ # NUMA1 pCPU | | 6 7
+ self.flags(
+ cpu_shared_set='0',
+ cpu_dedicated_set='2,3,6,7',
+ group='compute',
+ )
+ self.flags(vcpu_pin_set=None)
+
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=2, cpu_sockets=1, cpu_cores=4, cpu_threads=1)
+ self.start_compute(host_info=host_info, hostname='compute1')
+
+ # sanity check the created host topology object; this is really just a
+ # test of the fakelibvirt module
+ host_numa = objects.NUMATopology.obj_from_db_obj(
+ objects.ComputeNode.get_by_nodename(
+ self.ctxt, 'compute1',
+ ).numa_topology
+ )
+ self.assertEqual(2, len(host_numa.cells))
+ self.assertEqual({0}, host_numa.cells[0].cpuset)
+ self.assertEqual({2, 3}, host_numa.cells[0].pcpuset)
+
+ self.assertEqual(set(), host_numa.cells[1].cpuset)
+ self.assertEqual({6, 7}, host_numa.cells[1].pcpuset)
+
+ # create a flavor with 1 shared and 2 dedicated CPUs stretched to
+ # different NUMA nodes
+ extra_spec = {
+ 'hw:cpu_policy': 'mixed',
+ 'hw:cpu_dedicated_mask': '^0',
+ 'hw:numa_nodes': '2',
+ 'hw:numa_cpus.0': '0',
+ 'hw:numa_cpus.1': '1,2',
+ 'hw:numa_mem.0': '256',
+ 'hw:numa_mem.1': '768',
+ }
+ flavor_id = self._create_flavor(
+ vcpu=3, memory_mb=1024, extra_spec=extra_spec)
+ expected_usage = {
+ 'DISK_GB': 20, 'MEMORY_MB': 1024, 'PCPU': 2, 'VCPU': 1,
+ }
+ # The only possible solution (ignoring the order of vCPU1,2):
+ # vCPU 0 => pCPU 0, NUMA0, shared
+ # vCPU 1 => pCPU 6, NUMA1, dedicated
+ # vCPU 2 => pCPU 7, NUMA1, dedicated
+ server = self._run_build_test(
+ flavor_id, expected_usage=expected_usage)
+
+ # sanity check the instance topology
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ self.assertEqual(2, len(inst.numa_topology.cells))
+
+ self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
+ self.assertEqual(set(), inst.numa_topology.cells[0].pcpuset)
+ self.assertIsNone(inst.numa_topology.cells[0].cpu_pinning)
+
+ self.assertEqual(set(), inst.numa_topology.cells[1].cpuset)
+ self.assertEqual({1, 2}, inst.numa_topology.cells[1].pcpuset)
+ self.assertEqual(
+ {6, 7},
+ set(inst.numa_topology.cells[1].cpu_pinning.values())
+ )
+
def test_create_server_with_dedicated_policy_old_configuration(self):
"""Create a server using the legacy extra spec and configuration.
diff --git a/nova/tests/functional/libvirt/test_vpmem.py b/nova/tests/functional/libvirt/test_vpmem.py
index 1200f80357..cb524fe8b6 100644
--- a/nova/tests/functional/libvirt/test_vpmem.py
+++ b/nova/tests/functional/libvirt/test_vpmem.py
@@ -77,6 +77,7 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
'nova.privsep.libvirt.get_pmem_namespaces',
return_value=self.fake_pmem_namespaces))
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128,
diff --git a/nova/tests/functional/regressions/test_bug_1595962.py b/nova/tests/functional/regressions/test_bug_1595962.py
index 94421a81f9..9232eea335 100644
--- a/nova/tests/functional/regressions/test_bug_1595962.py
+++ b/nova/tests/functional/regressions/test_bug_1595962.py
@@ -47,6 +47,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.admin_api = api_fixture.admin_api
self.api = api_fixture.api
diff --git a/nova/tests/functional/regressions/test_bug_1995153.py b/nova/tests/functional/regressions/test_bug_1995153.py
new file mode 100644
index 0000000000..f4e61d06df
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1995153.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2023 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+from unittest import mock
+
+from oslo_serialization import jsonutils
+from oslo_utils import units
+
+from nova.objects import fields
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+
+
+class Bug1995153RegressionTest(
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+
+ ALIAS_NAME = 'a1'
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ }
+ )]
+ # we set the numa_affinity policy to required to ensure strict affinity
+ # between pci devices and the guest cpu and memory will be enforced.
+ PCI_ALIAS = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': ALIAS_NAME,
+ 'device_type': fields.PciDeviceType.STANDARD,
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ )]
+
+ def setUp(self):
+ super(Bug1995153RegressionTest, self).setUp()
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=self.PCI_ALIAS,
+ group='pci'
+ )
+ host_manager = self.scheduler.manager.host_manager
+ pci_filter_class = host_manager.filter_cls_map['PciPassthroughFilter']
+ host_pass_mock = mock.Mock(wraps=pci_filter_class().host_passes)
+ self.mock_filter = self.useFixture(fixtures.MockPatch(
+ 'nova.scheduler.filters.pci_passthrough_filter'
+ '.PciPassthroughFilter.host_passes',
+ side_effect=host_pass_mock)).mock
+
+ def test_socket_policy_bug_1995153(self):
+ """Previously, the numa_usage_from_instance_numa() method in
+ hardware.py saved the host NUMAToplogy object with NUMACells that have
+ no `socket` set. This was an omission in the original implementation of
+ the `socket` PCI NUMA affinity policy. The consequence was that any
+ code path that called into numa_usage_from_instance_numa() would
+ clobber the host NUMA topology in the database with a socket-less
+ version. Booting an instance with NUMA toplogy would do that, for
+ example. If then a second instance was booted with the `socket` PCI
+ NUMA affinity policy, it would read the socket-less host NUMATopology
+ from the database, and error out with a NotImplementedError. This was
+ bug 1995153. Demonstrate that this is fixed.
+ """
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2,
+ kB_mem=(16 * units.Gi) // units.Ki)
+ self.flags(cpu_dedicated_set='0-3', group='compute')
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
+
+ self.start_compute(host_info=host_info, pci_info=pci_info)
+
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
+ 'hw:pci_numa_affinity_policy': 'socket'
+ }
+ # Boot a first instance with a guest NUMA topology to run the
+ # numa_usage_from_instance_numa() and update the host NUMATopology in
+ # the database.
+ self._create_server(
+ flavor_id=self._create_flavor(
+ extra_spec={'hw:cpu_policy': 'dedicated'}))
+
+ # Boot an instance with the `socket` PCI NUMA affinity policy and
+ # assert that it boots correctly now.
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+ self.assertTrue(self.mock_filter.called)
diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py
index f5fcc168ee..c6a0ab2d52 100644
--- a/nova/tests/unit/cmd/test_status.py
+++ b/nova/tests/unit/cmd/test_status.py
@@ -446,3 +446,19 @@ class TestCheckMachineTypeUnset(test.NoDBTestCase):
upgradecheck.Code.SUCCESS,
result.code
)
+
+
+class TestUpgradeCheckServiceUserToken(test.NoDBTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.cmd = status.UpgradeCommands()
+
+ def test_service_user_token_not_configured(self):
+ result = self.cmd._check_service_user_token()
+ self.assertEqual(upgradecheck.Code.FAILURE, result.code)
+
+ def test_service_user_token_configured(self):
+ self.flags(send_service_user_token=True, group='service_user')
+ result = self.cmd._check_service_user_token()
+ self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 49cf15ec17..f62468544d 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -5672,6 +5672,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([1, 2]),
siblings=[set([1]), set([2])],
mempages=[objects.NUMAPagesTopology(
@@ -5687,6 +5688,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
memory_usage=0,
cpu_usage=0,
+ socket=0,
siblings=[set([3]), set([4])],
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=256, used=0)])
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index cd36b8987f..101e96f83f 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -181,6 +181,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([1]), set([2])],
pinned_cpus=set()),
@@ -191,6 +192,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([3]), set([4])],
pinned_cpus=set())]),
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index 9aa970aca1..c551191e4c 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -142,6 +142,22 @@ class TestNeutronClient(test.NoDBTestCase):
self.assertIsInstance(cl.httpclient.auth,
service_token.ServiceTokenAuthWrapper)
+ @mock.patch('nova.service_auth._SERVICE_AUTH')
+ @mock.patch('nova.network.neutron._ADMIN_AUTH')
+ @mock.patch.object(ks_loading, 'load_auth_from_conf_options')
+ def test_admin_with_service_token(
+ self, mock_load, mock_admin_auth, mock_service_auth
+ ):
+ self.flags(send_service_user_token=True, group='service_user')
+
+ admin_context = context.get_admin_context()
+
+ cl = neutronapi.get_client(admin_context)
+ self.assertIsInstance(cl.httpclient.auth,
+ service_token.ServiceTokenAuthWrapper)
+ self.assertEqual(mock_admin_auth, cl.httpclient.auth.user_auth)
+ self.assertEqual(mock_service_auth, cl.httpclient.auth.service_auth)
+
@mock.patch.object(client.Client, "list_networks",
side_effect=exceptions.Unauthorized())
def test_Unauthorized_user(self, mock_list_networks):
diff --git a/nova/tests/unit/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py
index 658c82c20e..f5dcf87e4a 100644
--- a/nova/tests/unit/scheduler/fakes.py
+++ b/nova/tests/unit/scheduler/fakes.py
@@ -34,6 +34,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=16, total=387184, used=0),
@@ -46,6 +47,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=1548736, used=0),
diff --git a/nova/tests/unit/test_service_auth.py b/nova/tests/unit/test_service_auth.py
index 5f07515188..8966af3ce3 100644
--- a/nova/tests/unit/test_service_auth.py
+++ b/nova/tests/unit/test_service_auth.py
@@ -56,3 +56,13 @@ class ServiceAuthTestCase(test.NoDBTestCase):
result = service_auth.get_auth_plugin(self.ctx)
self.assertEqual(1, mock_load.call_count)
self.assertNotIsInstance(result, service_token.ServiceTokenAuthWrapper)
+
+ @mock.patch.object(ks_loading, 'load_auth_from_conf_options',
+ new=mock.Mock())
+ def test_get_auth_plugin_user_auth(self):
+ self.flags(send_service_user_token=True, group='service_user')
+ user_auth = mock.Mock()
+
+ result = service_auth.get_auth_plugin(self.ctx, user_auth=user_auth)
+
+ self.assertEqual(user_auth, result.user_auth)
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
index 07e1774f9a..1e3e50f92b 100644
--- a/nova/tests/unit/virt/hyperv/test_vmops.py
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -1129,7 +1129,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_unplug_vifs.assert_called_once_with(
mock_instance, mock.sentinel.fake_network_info)
mock_disconnect_volumes.assert_called_once_with(
- mock.sentinel.FAKE_BD_INFO)
+ mock.sentinel.FAKE_BD_INFO, force=True)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py
index 66d2c2527f..f289d03632 100644
--- a/nova/tests/unit/virt/hyperv/test_volumeops.py
+++ b/nova/tests/unit/virt/hyperv/test_volumeops.py
@@ -141,7 +141,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
self._volumeops.disconnect_volumes(block_device_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
- block_device_mapping[0]['connection_info'])
+ block_device_mapping[0]['connection_info'], force=False)
+
+ # Verify force=True
+ fake_volume_driver.disconnect_volume.reset_mock()
+ self._volumeops.disconnect_volumes(block_device_info, force=True)
+ fake_volume_driver.disconnect_volume.assert_called_once_with(
+ block_device_mapping[0]['connection_info'], force=True)
@mock.patch('time.sleep')
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
@@ -181,7 +187,7 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
if attach_failed:
fake_volume_driver.disconnect_volume.assert_called_once_with(
- fake_conn_info)
+ fake_conn_info, force=False)
mock_sleep.assert_has_calls(
[mock.call(CONF.hyperv.volume_attach_retry_interval)] *
CONF.hyperv.volume_attach_retry_count)
@@ -203,7 +209,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_volume_driver.assert_called_once_with(
mock.sentinel.conn_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_info)
+ mock.sentinel.conn_info, force=False)
+
+ # Verify force=True
+ fake_volume_driver.disconnect_volume.reset_mock()
+ self._volumeops.disconnect_volume(mock.sentinel.conn_info, force=True)
+ fake_volume_driver.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_info, force=True)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_detach_volume(self, mock_get_volume_driver):
@@ -347,7 +359,13 @@ class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase):
self._base_vol_driver.disconnect_volume(conn_info)
self._conn.disconnect_volume.assert_called_once_with(
- conn_info['data'])
+ conn_info['data'], force=False)
+
+ # Verify force=True
+ self._conn.disconnect_volume.reset_mock()
+ self._base_vol_driver.disconnect_volume(conn_info, force=True)
+ self._conn.disconnect_volume.assert_called_once_with(
+ conn_info['data'], force=True)
@mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_res_path')
def _test_get_disk_resource_path_by_conn_info(self,
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 2b58c7df8b..774498b69f 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -740,6 +740,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
imagebackend.Image._get_driver_format)
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
+ self.cgroups = self.useFixture(nova_fixtures.CGroupsFixture())
# ensure tests perform the same on all host architectures; this is
# already done by the fakelibvirt fixture but we want to change the
@@ -3093,9 +3094,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'fake-flavor', 'fake-image-meta').obj_to_primitive())
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fits(self, is_able):
+ def test_get_guest_config_numa_host_instance_fits(self):
self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -3133,9 +3132,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
+ def test_get_guest_config_numa_host_instance_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
@@ -3563,10 +3560,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
host_topology, inst_topology, numa_tune)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_pci_no_numa_info(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
@@ -3620,10 +3614,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_2pci_no_fit(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -3740,10 +3731,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
None)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
self.flags(cpu_shared_set='2-3', cpu_dedicated_set=None,
group='compute')
@@ -3782,10 +3770,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_non_numa_host_instance_topo(
- self, is_able):
+ def test_get_guest_config_non_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
@@ -3833,10 +3818,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
numa_cfg_cell.memory)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_topo(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_topo(self):
self.flags(cpu_shared_set='0-5', cpu_dedicated_set=None,
group='compute')
@@ -7310,9 +7292,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
[],
image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_cpu_quota(self, is_able):
+ def test_get_guest_config_with_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7648,9 +7628,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(images_type='rbd', group='libvirt')
self._test_get_guest_config_disk_cachemodes('rbd')
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_bogus_cpu_quota(self, is_able):
+ def test_get_guest_config_with_bogus_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7668,9 +7646,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=False)
- def test_get_update_guest_cputune(self, is_able):
+ def test_get_update_guest_cputune(self):
+ # No CPU controller on the host
+ self.cgroups.version = 0
+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
@@ -9743,7 +9722,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._disconnect_volume(
self.context, fake_connection_info, fake_instance_1)
mock_volume_driver.disconnect_volume.assert_called_once_with(
- fake_connection_info, fake_instance_1)
+ fake_connection_info, fake_instance_1, force=False)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@@ -10117,7 +10096,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
device_name='vdc',
),
mock.call.detach_encryptor(**encryption),
- mock.call.disconnect_volume(connection_info, instance)])
+ mock.call.disconnect_volume(
+ connection_info,
+ instance,
+ force=False,
+ )
+ ])
get_device_conf_func = mock_detach_with_retry.mock_calls[0][1][2]
self.assertEqual(mock_guest.get_disk, get_device_conf_func.func)
self.assertEqual(('vdc',), get_device_conf_func.args)
@@ -20495,16 +20479,64 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.context,
mock.sentinel.connection_info,
instance,
- destroy_secrets=False
+ destroy_secrets=False,
+ force=True
),
mock.call(
self.context,
mock.sentinel.connection_info,
instance,
- destroy_secrets=True
+ destroy_secrets=True,
+ force=True
)
])
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
+ @mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._should_disconnect_target',
+ new=mock.Mock(return_value=True))
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._detach_encryptor',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vpmems',
+ new=mock.Mock(return_value=None))
+ def test_cleanup_disconnect_volume(self, mock_vol_driver):
+ """Verify that we call disconnect_volume() with force=True
+
+ cleanup() is called by destroy() when an instance is being deleted and
+ force=True should be passed down to os-brick's disconnect_volume()
+ call, which will ensure removal of devices regardless of errors.
+
+ We need to ensure that devices are removed when an instance is being
+ deleted to avoid leaving leftover devices that could later be
+ erroneously connected by external entities (example: multipathd) to
+ instances that should not have access to the volumes.
+
+ See https://bugs.launchpad.net/nova/+bug/2004555 for details.
+ """
+ connection_info = mock.MagicMock()
+ block_device_info = {
+ 'block_device_mapping': [
+ {
+ 'connection_info': connection_info
+ }
+ ]
+ }
+ instance = objects.Instance(self.context, **self.test_instance)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ drvr.cleanup(
+ self.context,
+ instance,
+ network_info={},
+ block_device_info=block_device_info,
+ destroy_vifs=False,
+ destroy_disks=False,
+ )
+ mock_vol_driver.return_value.disconnect_volume.assert_called_once_with(
+ connection_info, instance, force=True)
+
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_allow_native_luksv1')
def test_swap_volume_native_luks_blocked(self, mock_allow_native_luksv1,
@@ -22348,6 +22380,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.flags(sysinfo_serial="none", group="libvirt")
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
os_vif.initialize()
self.drvr = libvirt_driver.LibvirtDriver(
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index 631b10d81a..a76dc83105 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -1619,25 +1619,59 @@ Active: 8381604 kB
self.host.compare_cpu("cpuxml")
mock_compareCPU.assert_called_once_with("cpuxml", 0)
- def test_is_cpu_control_policy_capable_ok(self):
+ def test_is_cpu_control_policy_capable_via_neither(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=0))
+ self.assertFalse(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv1(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=1))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv2(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=2))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_has_cgroupsv1_cpu_controller_ok(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertTrue(self.host.is_cpu_control_policy_capable())
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv1_cpu_controller())
- def test_is_cpu_control_policy_capable_ko(self):
+ def test_has_cgroupsv1_cpu_controller_ko(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
- @mock.patch('builtins.open', side_effect=IOError)
- def test_is_cpu_control_policy_capable_ioerror(self, mock_open):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv1_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
+
+ def test_has_cgroupsv2_cpu_controller_ok(self):
+ m = mock.mock_open(
+ read_data="cpuset cpu io memory hugetlb pids rdma misc"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv2_cpu_controller())
+
+ def test_has_cgroupsv2_cpu_controller_ko(self):
+ m = mock.mock_open(
+ read_data="memory pids"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
+
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv2_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
def test_get_canonical_machine_type(self):
# this test relies on configuration from the FakeLibvirtFixture
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
index 06065322f6..55054652c3 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
@@ -81,3 +81,23 @@ class LibvirtFibreChannelVolumeDriverTestCase(
self.assertEqual(requested_size, new_size)
libvirt_driver.connector.extend_volume.assert_called_once_with(
connection_info['data'])
+
+ def test_disconnect_volume(self):
+ device_path = '/dev/fake-dev'
+ connection_info = {'data': {'device_path': device_path}}
+
+ libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
+ self.fake_host)
+ libvirt_driver.connector.disconnect_volume = mock.MagicMock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance)
+
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], connection_info['data'], force=False)
+
+ # Verify force=True
+ libvirt_driver.connector.disconnect_volume.reset_mock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance, force=True)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], connection_info['data'], force=True)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
index bd516b1dd6..a1111e0d12 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
@@ -57,10 +57,19 @@ class LibvirtISCSIVolumeDriverTestCase(
device=device_path))
libvirt_driver.disconnect_volume(connection_info,
mock.sentinel.instance)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], None, force=False)
msg = mock_LOG_warning.call_args_list[0]
self.assertIn('Ignoring VolumeDeviceNotFound', msg[0][0])
+ # Verify force=True
+ libvirt_driver.connector.disconnect_volume.reset_mock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance, force=True)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], None, force=True)
+
def test_extend_volume(self):
device_path = '/dev/fake-dev'
connection_info = {'data': {'device_path': device_path}}
diff --git a/nova/tests/unit/virt/libvirt/volume/test_lightos.py b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
index 8a85d73059..f97a696a53 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_lightos.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
@@ -62,7 +62,13 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
connection_info = {'data': disk_info}
lightos_driver.disconnect_volume(connection_info, None)
lightos_driver.connector.disconnect_volume.assert_called_once_with(
- disk_info, None)
+ disk_info, None, force=False)
+
+ # Verify force=True
+ lightos_driver.connector.disconnect_volume.reset_mock()
+ lightos_driver.disconnect_volume(connection_info, None, force=True)
+ lightos_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nvme.py b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
index 3f593841fa..42ef0adc8d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nvme.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
@@ -77,7 +77,13 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
connection_info = {'data': disk_info}
nvme_driver.disconnect_volume(connection_info, None)
nvme_driver.connector.disconnect_volume.assert_called_once_with(
- disk_info, None)
+ disk_info, None, force=False)
+
+ # Verify force=True
+ nvme_driver.connector.disconnect_volume.reset_mock()
+ nvme_driver.disconnect_volume(connection_info, None, force=True)
+ nvme_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
index f0fcba1deb..7d93691d9d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
@@ -49,7 +49,13 @@ class LibvirtScaleIOVolumeDriverTestCase(
conn = {'data': mock.sentinel.conn_data}
sio.disconnect_volume(conn, mock.sentinel.instance)
sio.connector.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_data, None)
+ mock.sentinel.conn_data, None, force=False)
+
+ # Verify force=True
+ sio.connector.disconnect_volume.reset_mock()
+ sio.disconnect_volume(conn, mock.sentinel.instance, force=True)
+ sio.connector.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_data, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_storpool.py b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
index 678d4f8eb4..a3252b8525 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_storpool.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
@@ -53,9 +53,11 @@ class MockStorPoolConnector(object):
}
return {'type': 'block', 'path': test_attached[v]['path']}
- def disconnect_volume(self, connection_info, device_info):
+ def disconnect_volume(self, connection_info, device_info, **kwargs):
self.inst.assertIn('client_id', connection_info)
self.inst.assertIn('volume', connection_info)
+ self.inst.assertIn('force', kwargs)
+ self.inst.assertEqual(self.inst.force, kwargs.get('force'))
v = connection_info['volume']
if v not in test_attached:
@@ -86,6 +88,11 @@ class MockStorPoolInitiator(object):
class LibvirtStorPoolVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
+ def setUp(self):
+ super().setUp()
+ # This is for testing the force flag of disconnect_volume()
+ self.force = False
+
def mock_storpool(f):
def _config_inner_inner1(inst, *args, **kwargs):
@mock.patch(
@@ -175,3 +182,10 @@ class LibvirtStorPoolVolumeDriverTestCase(
libvirt_driver.disconnect_volume(ci_2, mock.sentinel.instance)
self.assertDictEqual({}, test_attached)
+
+ # Connect the volume again so we can detach it again
+ libvirt_driver.connect_volume(ci_2, mock.sentinel.instance)
+ # Verify force=True
+ self.force = True
+ libvirt_driver.disconnect_volume(
+ ci_2, mock.sentinel.instance, force=True)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
index 168efee944..c9e455b193 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
@@ -95,7 +95,13 @@ class LibvirtVZStorageTestCase(test_volume.LibvirtVolumeBaseTestCase):
conn = {'data': mock.sentinel.conn_data}
drv.disconnect_volume(conn, mock.sentinel.instance)
drv.connector.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_data, None)
+ mock.sentinel.conn_data, None, force=False)
+
+ # Verify force=True
+ drv.connector.disconnect_volume.reset_mock()
+ drv.disconnect_volume(conn, mock.sentinel.instance, force=True)
+ drv.connector.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_data, None, force=True)
def test_libvirt_vzstorage_driver_get_config(self):
libvirt_driver = vzstorage.LibvirtVZStorageVolumeDriver(self.fake_host)
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 753ee41550..ab51a3e26c 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -2023,6 +2023,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=0),
@@ -2036,6 +2037,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=1,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=64),
@@ -2049,6 +2051,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=2,
cpu_usage=0,
memory_usage=0,
+ socket=2,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=16)],
@@ -2130,6 +2133,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=160,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=32),
@@ -2170,6 +2174,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2181,6 +2186,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2192,6 +2198,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2258,6 +2265,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=2,
memory_usage=512,
+ socket=0,
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
siblings=[set([0]), set([1]), set([2]), set([3])],
@@ -2269,6 +2277,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=1,
memory_usage=512,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2280,6 +2289,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2330,6 +2340,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=512, used=128,
@@ -2342,6 +2353,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=1048576, total=5, used=2,
@@ -2606,6 +2618,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -2616,6 +2629,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -4162,6 +4176,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4191,6 +4206,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 3]),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -4220,6 +4236,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4248,6 +4265,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4274,6 +4292,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4300,6 +4319,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4326,6 +4346,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4355,6 +4376,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4385,6 +4407,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2, 6, 7]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4417,6 +4440,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
cpu_usage=2,
memory_usage=0,
pinned_cpus=set(),
+ socket=0,
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)]
@@ -4450,6 +4474,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
@@ -4492,6 +4517,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4531,6 +4557,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 5, 6, 7]),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4766,6 +4793,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1])],
mempages=[objects.NUMAPagesTopology(
@@ -4777,6 +4805,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index 58fa3d4c27..ed9f1e3822 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -832,6 +832,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
# This is needed for the live migration tests which spawn off the
# operation for monitoring.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
# When destroying an instance, os-vif will try to execute some commands
# which hang tests so let's just stub out the unplug call to os-vif
# since we don't care about it.
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index e53ebe3cb8..f9080726fb 100644
--- a/nova/tests/unit/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -1276,3 +1276,14 @@ class CinderClientTestCase(test.NoDBTestCase):
admin_ctx = context.get_admin_context()
params = cinder._get_cinderclient_parameters(admin_ctx)
self.assertEqual(params[0], mock_admin_auth)
+
+ @mock.patch('nova.service_auth._SERVICE_AUTH')
+ @mock.patch('nova.volume.cinder._ADMIN_AUTH')
+ def test_admin_context_without_user_token_but_with_service_token(
+ self, mock_admin_auth, mock_service_auth
+ ):
+ self.flags(send_service_user_token=True, group='service_user')
+ admin_ctx = context.get_admin_context()
+ params = cinder._get_cinderclient_parameters(admin_ctx)
+ self.assertEqual(mock_admin_auth, params[0].user_auth)
+ self.assertEqual(mock_service_auth, params[0].service_auth)
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index c8f8bb2481..9693e405d3 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -869,7 +869,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
continue
break
@@ -895,7 +895,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
cpuset_reserved = _get_reserved(
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
return
LOG.debug('Selected cores for pinning: %s, in cell %s', pinning,
host_cell.id)
@@ -2583,6 +2583,7 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
cpuset=host_cell.cpuset,
pcpuset=host_cell.pcpuset,
memory=host_cell.memory,
+ socket=host_cell.socket,
cpu_usage=0,
memory_usage=0,
mempages=host_cell.mempages,
@@ -2607,8 +2608,10 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
None, fields.CPUAllocationPolicy.SHARED,
):
continue
-
- pinned_cpus = set(instance_cell.cpu_pinning.values())
+ if instance_cell.cpu_pinning:
+ pinned_cpus = set(instance_cell.cpu_pinning.values())
+ else:
+ pinned_cpus = set()
if instance_cell.cpuset_reserved:
pinned_cpus |= instance_cell.cpuset_reserved
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 3ec7e90c30..08adeada76 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -747,7 +747,7 @@ class VMOps(object):
# should be disconnected even if the VM doesn't exist anymore,
# so they are not leaked.
self.unplug_vifs(instance, network_info)
- self._volumeops.disconnect_volumes(block_device_info)
+ self._volumeops.disconnect_volumes(block_device_info, force=True)
if destroy_disks:
self._delete_disk_files(instance_name)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index da5b40f375..d2bfed2441 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -59,10 +59,10 @@ class VolumeOps(object):
for vol in volumes:
self.attach_volume(vol['connection_info'], instance_name)
- def disconnect_volumes(self, block_device_info):
+ def disconnect_volumes(self, block_device_info, force=False):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
- self.disconnect_volume(vol['connection_info'])
+ self.disconnect_volume(vol['connection_info'], force=force)
def attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
@@ -116,9 +116,9 @@ class VolumeOps(object):
volume_driver.set_disk_qos_specs(connection_info,
qos_specs)
- def disconnect_volume(self, connection_info):
+ def disconnect_volume(self, connection_info, force=False):
volume_driver = self._get_volume_driver(connection_info)
- volume_driver.disconnect_volume(connection_info)
+ volume_driver.disconnect_volume(connection_info, force=force)
def detach_volume(self, connection_info, instance_name):
LOG.debug("Detaching volume: %(connection_info)s "
@@ -231,8 +231,8 @@ class BaseVolumeDriver(object):
def connect_volume(self, connection_info):
return self._connector.connect_volume(connection_info['data'])
- def disconnect_volume(self, connection_info):
- self._connector.disconnect_volume(connection_info['data'])
+ def disconnect_volume(self, connection_info, force=False):
+ self._connector.disconnect_volume(connection_info['data'], force=force)
def get_disk_resource_path(self, connection_info):
disk_paths = self._connector.get_volume_paths(connection_info['data'])
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 510dfdf38f..7f02c8ed47 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1657,7 +1657,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
self._disconnect_volume(
context, connection_info, instance,
- destroy_secrets=destroy_secrets)
+ destroy_secrets=destroy_secrets, force=True)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if cleanup_instance_disks:
@@ -1974,7 +1974,7 @@ class LibvirtDriver(driver.ComputeDriver):
return (False if connection_count > 1 else True)
def _disconnect_volume(self, context, connection_info, instance,
- encryption=None, destroy_secrets=True):
+ encryption=None, destroy_secrets=True, force=False):
self._detach_encryptor(
context,
connection_info,
@@ -1986,7 +1986,8 @@ class LibvirtDriver(driver.ComputeDriver):
multiattach = connection_info.get('multiattach', False)
if self._should_disconnect_target(
context, instance, multiattach, vol_driver, volume_id):
- vol_driver.disconnect_volume(connection_info, instance)
+ vol_driver.disconnect_volume(
+ connection_info, instance, force=force)
else:
LOG.info('Detected multiple connections on this host for '
'volume: %(volume)s, skipping target disconnect.',
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index 9658a5791d..1ae86d9f47 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -1643,15 +1643,44 @@ class Host(object):
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
+ return self._has_cgroupsv1_cpu_controller() or \
+ self._has_cgroupsv2_cpu_controller()
+
+ def _has_cgroupsv1_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V1...")
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
+ return True
+ LOG.debug("CPU controller missing on host.")
+ return False
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V1. "
+ "Deemed host to be missing controller by this approach.")
+ return False
+
+ def _has_cgroupsv2_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V2...")
+ try:
+ with open("/sys/fs/cgroup/cgroup.controllers", "r") as fd:
+ for line in fd.readlines():
+ bits = line.split()
+ if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
return True
+ LOG.debug("CPU controller missing on host.")
return False
- except IOError:
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V2. "
+ "Deemed host to be missing controller by this approach.")
return False
def get_canonical_machine_type(self, arch, machine) -> str:
diff --git a/nova/virt/libvirt/volume/fibrechannel.py b/nova/virt/libvirt/volume/fibrechannel.py
index 22c65e99c0..1752f6d0cc 100644
--- a/nova/virt/libvirt/volume/fibrechannel.py
+++ b/nova/virt/libvirt/volume/fibrechannel.py
@@ -59,7 +59,7 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
connection_info['data']['multipath_id'] = \
device_info['multipath_id']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach FC Volume", instance=instance)
@@ -69,11 +69,12 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
# the 2nd param of disconnect_volume and be consistent
# with the rest of the connectors.
self.connector.disconnect_volume(connection_info['data'],
- connection_info['data'])
+ connection_info['data'],
+ force=force)
LOG.debug("Disconnected FC Volume", instance=instance)
super(LibvirtFibreChannelVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/fs.py b/nova/virt/libvirt/volume/fs.py
index 5fb9af4a52..992ef45016 100644
--- a/nova/virt/libvirt/volume/fs.py
+++ b/nova/virt/libvirt/volume/fs.py
@@ -116,7 +116,7 @@ class LibvirtMountedFileSystemVolumeDriver(LibvirtBaseFileSystemVolumeDriver,
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
vol_name = connection_info['data']['name']
mountpoint = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/iscsi.py b/nova/virt/libvirt/volume/iscsi.py
index 564bac14cc..2b25972a49 100644
--- a/nova/virt/libvirt/volume/iscsi.py
+++ b/nova/virt/libvirt/volume/iscsi.py
@@ -66,19 +66,20 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume", instance=instance)
try:
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return
LOG.debug("Disconnected iSCSI Volume", instance=instance)
super(LibvirtISCSIVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/lightos.py b/nova/virt/libvirt/volume/lightos.py
index d6d393994e..6a22bf6dc6 100644
--- a/nova/virt/libvirt/volume/lightos.py
+++ b/nova/virt/libvirt/volume/lightos.py
@@ -42,14 +42,15 @@ class LibvirtLightOSVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
LOG.debug("Connecting NVMe volume with device_info %s", device_info)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk. instance:%s, volume_id:%s",
connection_info.get("instance", ""),
connection_info.get("volume_id", ""))
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
super(LibvirtLightOSVolumeDriver, self).disconnect_volume(
- connection_info, instance)
+ connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size=None):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/nvme.py b/nova/virt/libvirt/volume/nvme.py
index 7436552812..e2977c3572 100644
--- a/nova/virt/libvirt/volume/nvme.py
+++ b/nova/virt/libvirt/volume/nvme.py
@@ -45,13 +45,13 @@ class LibvirtNVMEVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk", instance=instance)
self.connector.disconnect_volume(
- connection_info['data'], None)
+ connection_info['data'], None, force=force)
super(LibvirtNVMEVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/quobyte.py b/nova/virt/libvirt/volume/quobyte.py
index bb7a770e57..2eb4bcfb42 100644
--- a/nova/virt/libvirt/volume/quobyte.py
+++ b/nova/virt/libvirt/volume/quobyte.py
@@ -189,7 +189,7 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
instance=instance)
@utils.synchronized('connect_qb_volume')
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/scaleio.py b/nova/virt/libvirt/volume/scaleio.py
index 7c414c2870..04a9423e8e 100644
--- a/nova/virt/libvirt/volume/scaleio.py
+++ b/nova/virt/libvirt/volume/scaleio.py
@@ -57,12 +57,13 @@ class LibvirtScaleIOVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
instance=instance)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
- self.connector.disconnect_volume(connection_info['data'], None)
+ def disconnect_volume(self, connection_info, instance, force=False):
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Disconnected volume", instance=instance)
super(LibvirtScaleIOVolumeDriver, self).disconnect_volume(
- connection_info, instance)
+ connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
LOG.debug("calling os-brick to extend ScaleIO Volume",
diff --git a/nova/virt/libvirt/volume/smbfs.py b/nova/virt/libvirt/volume/smbfs.py
index d112af750c..9de1ce23cd 100644
--- a/nova/virt/libvirt/volume/smbfs.py
+++ b/nova/virt/libvirt/volume/smbfs.py
@@ -52,7 +52,7 @@ class LibvirtSMBFSVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
device_path = self._get_device_path(connection_info)
connection_info['data']['device_path'] = device_path
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/storpool.py b/nova/virt/libvirt/volume/storpool.py
index 0e71221f5b..e6dffca39a 100644
--- a/nova/virt/libvirt/volume/storpool.py
+++ b/nova/virt/libvirt/volume/storpool.py
@@ -47,10 +47,11 @@ class LibvirtStorPoolVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
device_info, instance=instance)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
LOG.debug("Detaching StorPool volume %s",
connection_info['data']['volume'], instance=instance)
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Detached StorPool volume", instance=instance)
def extend_volume(self, connection_info, instance, requested_size):
diff --git a/nova/virt/libvirt/volume/volume.py b/nova/virt/libvirt/volume/volume.py
index 6d650c80e6..f76c3618b2 100644
--- a/nova/virt/libvirt/volume/volume.py
+++ b/nova/virt/libvirt/volume/volume.py
@@ -135,7 +135,7 @@ class LibvirtBaseVolumeDriver(object):
"""Connect the volume."""
pass
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
pass
diff --git a/nova/virt/libvirt/volume/vzstorage.py b/nova/virt/libvirt/volume/vzstorage.py
index 85ffb45076..babfdef55c 100644
--- a/nova/virt/libvirt/volume/vzstorage.py
+++ b/nova/virt/libvirt/volume/vzstorage.py
@@ -126,9 +126,10 @@ class LibvirtVZStorageVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
return _connect_volume(connection_info, instance)
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach Vzstorage Volume",
instance=instance)
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Disconnected Vzstorage Volume", instance=instance)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 01efcfec19..f5328148d2 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -91,12 +91,14 @@ def _get_auth(context):
# from them generated from 'context.get_admin_context'
# which only set is_admin=True but is without token.
# So add load_auth_plugin when this condition appear.
+ user_auth = None
if context.is_admin and not context.auth_token:
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
- return _ADMIN_AUTH
- else:
- return service_auth.get_auth_plugin(context)
+ user_auth = _ADMIN_AUTH
+
+ # When user_auth = None, user_auth will be extracted from the context.
+ return service_auth.get_auth_plugin(context, user_auth=user_auth)
# NOTE(efried): Bug #1752152
diff --git a/releasenotes/notes/service-user-token-421d067c16257782.yaml b/releasenotes/notes/service-user-token-421d067c16257782.yaml
new file mode 100644
index 0000000000..d3af14fbb8
--- /dev/null
+++ b/releasenotes/notes/service-user-token-421d067c16257782.yaml
@@ -0,0 +1,11 @@
+upgrade:
+ - |
+ Configuration of service user tokens is now **required** for all Nova services
+ to ensure security of block-storage volume data.
+
+ All Nova configuration files must configure the ``[service_user]`` section as
+ described in the `documentation`__.
+
+ See https://bugs.launchpad.net/nova/+bug/2004555 for more details.
+
+ __ https://docs.openstack.org/nova/latest/admin/configuration/service-user-token.html
diff --git a/tools/check-cherry-picks.sh b/tools/check-cherry-picks.sh
index 46cef8c225..3042aa1659 100755
--- a/tools/check-cherry-picks.sh
+++ b/tools/check-cherry-picks.sh
@@ -23,7 +23,7 @@ hashes=$(git show --format='%b' --quiet $commit_hash | sed -nr 's/^.cherry picke
checked=0
branches+=""
for hash in $hashes; do
- branch=$(git branch -a --contains "$hash" 2>/dev/null| grep -oE '(master|stable/[a-z]+)')
+ branch=$(git branch -a --contains "$hash" 2>/dev/null| grep -oE '(master|stable/[a-z0-9.]+)')
if [ $? -ne 0 ]; then
echo "Cherry pick hash $hash not on any master or stable branches"
exit 1