summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/source/admin/configuration/cross-cell-resize.rst2
-rw-r--r--doc/source/admin/configuration/index.rst1
-rw-r--r--doc/source/admin/configuration/service-user-token.rst59
-rw-r--r--doc/source/admin/live-migration-usage.rst2
-rw-r--r--doc/source/admin/migrate-instance-with-snapshot.rst2
-rw-r--r--doc/source/admin/support-compute.rst64
-rw-r--r--doc/source/install/compute-install-obs.rst20
-rw-r--r--doc/source/install/compute-install-rdo.rst20
-rw-r--r--doc/source/install/compute-install-ubuntu.rst20
-rw-r--r--doc/source/install/controller-install-obs.rst20
-rw-r--r--doc/source/install/controller-install-rdo.rst20
-rw-r--r--doc/source/install/controller-install-ubuntu.rst20
-rw-r--r--nova/cmd/status.py11
-rw-r--r--nova/compute/manager.py3
-rw-r--r--nova/network/neutron.py8
-rw-r--r--nova/service_auth.py6
-rw-r--r--nova/tests/unit/cmd/test_status.py16
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py36
-rw-r--r--nova/tests/unit/network/test_neutron.py16
-rw-r--r--nova/tests/unit/test_service_auth.py10
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeops.py26
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py22
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py61
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py20
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_iscsi.py9
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_lightos.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nvme.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_scaleio.py8
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_storpool.py16
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_vzstorage.py8
-rw-r--r--nova/tests/unit/volume/test_cinder.py11
-rw-r--r--nova/virt/hyperv/vmops.py2
-rw-r--r--nova/virt/hyperv/volumeops.py12
-rw-r--r--nova/virt/ironic/driver.py12
-rw-r--r--nova/virt/libvirt/driver.py7
-rw-r--r--nova/virt/libvirt/volume/fibrechannel.py7
-rw-r--r--nova/virt/libvirt/volume/fs.py2
-rw-r--r--nova/virt/libvirt/volume/iscsi.py7
-rw-r--r--nova/virt/libvirt/volume/lightos.py7
-rw-r--r--nova/virt/libvirt/volume/nvme.py6
-rw-r--r--nova/virt/libvirt/volume/quobyte.py2
-rw-r--r--nova/virt/libvirt/volume/scaleio.py7
-rw-r--r--nova/virt/libvirt/volume/smbfs.py2
-rw-r--r--nova/virt/libvirt/volume/storpool.py5
-rw-r--r--nova/virt/libvirt/volume/volume.py2
-rw-r--r--nova/virt/libvirt/volume/vzstorage.py5
-rw-r--r--nova/volume/cinder.py8
-rw-r--r--releasenotes/notes/service-user-token-421d067c16257782.yaml11
-rwxr-xr-xtools/check-cherry-picks.sh2
50 files changed, 535 insertions, 126 deletions
diff --git a/doc/source/admin/configuration/cross-cell-resize.rst b/doc/source/admin/configuration/cross-cell-resize.rst
index e51e425774..0c34fd13f5 100644
--- a/doc/source/admin/configuration/cross-cell-resize.rst
+++ b/doc/source/admin/configuration/cross-cell-resize.rst
@@ -284,7 +284,7 @@ Troubleshooting
Timeouts
~~~~~~~~
-Configure a :ref:`service user <user_token_timeout>` in case the user token
+Configure a :ref:`service user <service_user_token>` in case the user token
times out, e.g. during the snapshot and download of a large server image.
If RPC calls are timing out with a ``MessagingTimeout`` error in the logs,
diff --git a/doc/source/admin/configuration/index.rst b/doc/source/admin/configuration/index.rst
index 233597b1fe..f5b6fde9da 100644
--- a/doc/source/admin/configuration/index.rst
+++ b/doc/source/admin/configuration/index.rst
@@ -19,6 +19,7 @@ A list of config options based on different topics can be found below:
.. toctree::
:maxdepth: 1
+ /admin/configuration/service-user-token
/admin/configuration/api
/admin/configuration/resize
/admin/configuration/cross-cell-resize
diff --git a/doc/source/admin/configuration/service-user-token.rst b/doc/source/admin/configuration/service-user-token.rst
new file mode 100644
index 0000000000..740730af1d
--- /dev/null
+++ b/doc/source/admin/configuration/service-user-token.rst
@@ -0,0 +1,59 @@
+.. _service_user_token:
+
+===================
+Service User Tokens
+===================
+
+.. note::
+
+ Configuration of service user tokens is **required** for every Nova service
+ for security reasons. See https://bugs.launchpad.net/nova/+bug/2004555 for
+ details.
+
+Configure Nova to send service user tokens alongside regular user tokens when
+making REST API calls to other services. The identity service (Keystone) will
+authenticate a request using the service user token if the regular user token
+has expired.
+
+This is important when long-running operations such as live migration or
+snapshot take long enough to exceed the expiry of the user token. Without the
+service token, if a long-running operation exceeds the expiry of the user
+token, post operations such as cleanup after a live migration could fail when
+Nova calls other service APIs like block-storage (Cinder) or networking
+(Neutron).
+
+The service token is also used by services to validate whether the API caller
+is a service. Some service APIs are restricted to service users only.
+
+To set up service tokens, create a ``nova`` service user and ``service`` role
+in the identity service (Keystone) and assign the ``service`` role to the
+``nova`` service user.
+
+Then, configure the :oslo.config:group:`service_user` section of the Nova
+configuration file, for example:
+
+.. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://104.130.216.102/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = secretservice
+ ...
+
+And configure the other identity options as necessary for the service user,
+much like you would configure nova to work with the image service (Glance) or
+networking service (Neutron).
+
+.. note::
+
+ Please note that the role assigned to the :oslo.config:group:`service_user`
+ needs to be in the configured
+ :oslo.config:option:`keystone_authtoken.service_token_roles` of other
+ services such as block-storage (Cinder), image (Glance), and networking
+ (Neutron).
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index 783ab5e27c..a1e7f18756 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -320,4 +320,4 @@ To make live-migration succeed, you have several options:
If live migrations routinely timeout or fail during cleanup operations due
to the user token timing out, consider configuring nova to use
-:ref:`service user tokens <user_token_timeout>`.
+:ref:`service user tokens <service_user_token>`.
diff --git a/doc/source/admin/migrate-instance-with-snapshot.rst b/doc/source/admin/migrate-instance-with-snapshot.rst
index 65059679ab..230431091e 100644
--- a/doc/source/admin/migrate-instance-with-snapshot.rst
+++ b/doc/source/admin/migrate-instance-with-snapshot.rst
@@ -67,7 +67,7 @@ Create a snapshot of the instance
If snapshot operations routinely fail because the user token times out
while uploading a large disk image, consider configuring nova to use
- :ref:`service user tokens <user_token_timeout>`.
+ :ref:`service user tokens <service_user_token>`.
#. Use the :command:`openstack image list` command to check the status
until the status is ``ACTIVE``:
diff --git a/doc/source/admin/support-compute.rst b/doc/source/admin/support-compute.rst
index 8522e51d79..31e32fd1dd 100644
--- a/doc/source/admin/support-compute.rst
+++ b/doc/source/admin/support-compute.rst
@@ -478,67 +478,3 @@ Ensure the ``compute`` endpoint in the identity service catalog is pointing
at ``/v2.1`` instead of ``/v2``. The former route supports microversions,
while the latter route is considered the legacy v2.0 compatibility-mode
route which renders all requests as if they were made on the legacy v2.0 API.
-
-
-.. _user_token_timeout:
-
-User token times out during long-running operations
----------------------------------------------------
-
-Problem
-~~~~~~~
-
-Long-running operations such as live migration or snapshot can sometimes
-overrun the expiry of the user token. In such cases, post operations such
-as cleaning up after a live migration can fail when the nova-compute service
-needs to cleanup resources in other services, such as in the block-storage
-(cinder) or networking (neutron) services.
-
-For example:
-
-.. code-block:: console
-
- 2018-12-17 13:47:29.591 16987 WARNING nova.virt.libvirt.migration [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live migration not completed after 2400 sec
- 2018-12-17 13:47:30.097 16987 WARNING nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Migration operation was cancelled
- 2018-12-17 13:47:30.299 16987 ERROR nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live Migration failure: operation aborted: migration job: canceled by client: libvirtError: operation aborted: migration job: canceled by client
- 2018-12-17 13:47:30.685 16987 INFO nova.compute.manager [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Swapping old allocation on 3e32d595-bd1f-4136-a7f4-c6703d2fbe18 held by migration 17bec61d-544d-47e0-a1c1-37f9d7385286 for instance
- 2018-12-17 13:47:32.450 16987 ERROR nova.volume.cinder [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] Delete attachment failed for attachment 58997d5b-24f0-4073-819e-97916fb1ee19. Error: The request you have made requires authentication. (HTTP 401) Code: 401: Unauthorized: The request you have made requires authentication. (HTTP 401)
-
-Solution
-~~~~~~~~
-
-Configure nova to use service user tokens to supplement the regular user token
-used to initiate the operation. The identity service (keystone) will then
-authenticate a request using the service user token if the user token has
-already expired.
-
-To use, create a service user in the identity service similar as you would when
-creating the ``nova`` service user.
-
-Then configure the :oslo.config:group:`service_user` section of the nova
-configuration file, for example:
-
-.. code-block:: ini
-
- [service_user]
- send_service_user_token = True
- auth_type = password
- project_domain_name = Default
- project_name = service
- user_domain_name = Default
- password = secretservice
- username = nova
- auth_url = https://104.130.216.102/identity
- ...
-
-And configure the other identity options as necessary for the service user,
-much like you would configure nova to work with the image service (glance)
-or networking service.
-
-.. note::
-
- Please note that the role of the :oslo.config:group:`service_user` you
- configure needs to be a superset of
- :oslo.config:option:`keystone_authtoken.service_token_roles` (The option
- :oslo.config:option:`keystone_authtoken.service_token_roles` is configured
- in cinder, glance and neutron).
diff --git a/doc/source/install/compute-install-obs.rst b/doc/source/install/compute-install-obs.rst
index c5c1d29fb3..c227b6eba4 100644
--- a/doc/source/install/compute-install-obs.rst
+++ b/doc/source/install/compute-install-obs.rst
@@ -92,6 +92,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/compute-install-rdo.rst b/doc/source/install/compute-install-rdo.rst
index 0a5ad685a6..0c6203a667 100644
--- a/doc/source/install/compute-install-rdo.rst
+++ b/doc/source/install/compute-install-rdo.rst
@@ -84,6 +84,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/compute-install-ubuntu.rst b/doc/source/install/compute-install-ubuntu.rst
index 8605c73316..baf0585e52 100644
--- a/doc/source/install/compute-install-ubuntu.rst
+++ b/doc/source/install/compute-install-ubuntu.rst
@@ -74,6 +74,26 @@ Install and configure components
Comment out or remove any other options in the
``[keystone_authtoken]`` section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
.. path /etc/nova/nova.conf
diff --git a/doc/source/install/controller-install-obs.rst b/doc/source/install/controller-install-obs.rst
index 18499612c3..01b7bb0f5a 100644
--- a/doc/source/install/controller-install-obs.rst
+++ b/doc/source/install/controller-install-obs.rst
@@ -260,6 +260,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/doc/source/install/controller-install-rdo.rst b/doc/source/install/controller-install-rdo.rst
index fd2419631e..b6098f1776 100644
--- a/doc/source/install/controller-install-rdo.rst
+++ b/doc/source/install/controller-install-rdo.rst
@@ -247,6 +247,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/doc/source/install/controller-install-ubuntu.rst b/doc/source/install/controller-install-ubuntu.rst
index 7282b0b2e2..1363a98ba8 100644
--- a/doc/source/install/controller-install-ubuntu.rst
+++ b/doc/source/install/controller-install-ubuntu.rst
@@ -237,6 +237,26 @@ Install and configure components
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
+ * In the ``[service_user]`` section, configure :ref:`service user
+ tokens <service_user_token>`:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [service_user]
+ send_service_user_token = true
+ auth_url = https://controller/identity
+ auth_strategy = keystone
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ username = nova
+ password = NOVA_PASS
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in
+ the Identity service.
+
* In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the
management interface IP address of the controller node:
diff --git a/nova/cmd/status.py b/nova/cmd/status.py
index 29e4a5d01e..4a4e28d7e8 100644
--- a/nova/cmd/status.py
+++ b/nova/cmd/status.py
@@ -271,6 +271,15 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+ def _check_service_user_token(self):
+ if not CONF.service_user.send_service_user_token:
+ msg = (_("""
+Service user token configuration is required for all Nova services.
+For more details see the following:
+https://docs.openstack.org/latest/nova/admin/configuration/service-user-token.html""")) # noqa
+ return upgradecheck.Result(upgradecheck.Code.FAILURE, msg)
+ return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+
# The format of the check functions is to return an upgradecheck.Result
# object with the appropriate upgradecheck.Code and details set. If the
# check hits warnings or failures then those should be stored in the
@@ -294,6 +303,8 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
(_('Older than N-1 computes'), _check_old_computes),
# Added in Wallaby
(_('hw_machine_type unset'), _check_machine_type_set),
+ # Added in Bobcat
+ (_('Service User Token Configuration'), _check_service_user_token),
)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index f25d037c50..d29348097f 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -2736,7 +2736,8 @@ class ComputeManager(manager.Manager):
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
- exception.UnexpectedDeletingTaskStateError):
+ exception.UnexpectedDeletingTaskStateError,
+ exception.ComputeResourcesUnavailable):
with excutils.save_and_reraise_exception():
self._build_resources_cleanup(instance, network_info)
except (exception.UnexpectedTaskStateError,
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index 27e7d06455..affd76535f 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -222,13 +222,15 @@ def _get_auth_plugin(context, admin=False):
# support some services (metadata API) where an admin context is used
# without an auth token.
global _ADMIN_AUTH
+ user_auth = None
if admin or (context.is_admin and not context.auth_token):
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
- return _ADMIN_AUTH
+ user_auth = _ADMIN_AUTH
- if context.auth_token:
- return service_auth.get_auth_plugin(context)
+ if context.auth_token or user_auth:
+ # When user_auth = None, user_auth will be extracted from the context.
+ return service_auth.get_auth_plugin(context, user_auth=user_auth)
# We did not get a user token and we should not be using
# an admin token so log an error
diff --git a/nova/service_auth.py b/nova/service_auth.py
index f5ae0646d8..aa8fd8fa12 100644
--- a/nova/service_auth.py
+++ b/nova/service_auth.py
@@ -30,8 +30,10 @@ def reset_globals():
_SERVICE_AUTH = None
-def get_auth_plugin(context):
- user_auth = context.get_auth_plugin()
+def get_auth_plugin(context, user_auth=None):
+ # user_auth may be passed in when the RequestContext is anonymous, such as
+ # when get_admin_context() is used for API calls by nova-manage.
+ user_auth = user_auth or context.get_auth_plugin()
if CONF.service_user.send_service_user_token:
global _SERVICE_AUTH
diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py
index f5fcc168ee..c6a0ab2d52 100644
--- a/nova/tests/unit/cmd/test_status.py
+++ b/nova/tests/unit/cmd/test_status.py
@@ -446,3 +446,19 @@ class TestCheckMachineTypeUnset(test.NoDBTestCase):
upgradecheck.Code.SUCCESS,
result.code
)
+
+
+class TestUpgradeCheckServiceUserToken(test.NoDBTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.cmd = status.UpgradeCommands()
+
+ def test_service_user_token_not_configured(self):
+ result = self.cmd._check_service_user_token()
+ self.assertEqual(upgradecheck.Code.FAILURE, result.code)
+
+ def test_service_user_token_configured(self):
+ self.flags(send_service_user_token=True, group='service_user')
+ result = self.cmd._check_service_user_token()
+ self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 1a4935f482..e521283acc 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -7927,6 +7927,42 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
@mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'prepare_networks_before_block_device_mapping')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'clean_networks_preparation')
+ def test_failed_prepare_for_spawn(self, mock_clean, mock_prepnet,
+ mock_prepspawn, mock_failedspawn):
+ mock_prepspawn.side_effect = exception.ComputeResourcesUnavailable(
+ reason="asdf")
+ with mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info
+ ) as _build_networks_for_instance:
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping,
+ self.resource_provider_mapping, self.accel_uuids):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e,
+ exception.ComputeResourcesUnavailable)
+
+ _build_networks_for_instance.assert_has_calls(
+ [mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.resource_provider_mapping,
+ self.network_arqs)])
+
+ mock_prepnet.assert_not_called()
+ mock_clean.assert_called_once_with(self.instance, self.network_info)
+ mock_prepspawn.assert_called_once_with(self.instance)
+ mock_failedspawn.assert_called_once_with(self.instance)
+
+ @mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
+ @mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
def test_build_resources_aborts_on_failed_network_alloc(self, mock_build,
mock_prepspawn,
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index eefa7b974f..fec66fb2d3 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -142,6 +142,22 @@ class TestNeutronClient(test.NoDBTestCase):
self.assertIsInstance(cl.httpclient.auth,
service_token.ServiceTokenAuthWrapper)
+ @mock.patch('nova.service_auth._SERVICE_AUTH')
+ @mock.patch('nova.network.neutron._ADMIN_AUTH')
+ @mock.patch.object(ks_loading, 'load_auth_from_conf_options')
+ def test_admin_with_service_token(
+ self, mock_load, mock_admin_auth, mock_service_auth
+ ):
+ self.flags(send_service_user_token=True, group='service_user')
+
+ admin_context = context.get_admin_context()
+
+ cl = neutronapi.get_client(admin_context)
+ self.assertIsInstance(cl.httpclient.auth,
+ service_token.ServiceTokenAuthWrapper)
+ self.assertEqual(mock_admin_auth, cl.httpclient.auth.user_auth)
+ self.assertEqual(mock_service_auth, cl.httpclient.auth.service_auth)
+
@mock.patch.object(client.Client, "list_networks",
side_effect=exceptions.Unauthorized())
def test_Unauthorized_user(self, mock_list_networks):
diff --git a/nova/tests/unit/test_service_auth.py b/nova/tests/unit/test_service_auth.py
index 5f07515188..8966af3ce3 100644
--- a/nova/tests/unit/test_service_auth.py
+++ b/nova/tests/unit/test_service_auth.py
@@ -56,3 +56,13 @@ class ServiceAuthTestCase(test.NoDBTestCase):
result = service_auth.get_auth_plugin(self.ctx)
self.assertEqual(1, mock_load.call_count)
self.assertNotIsInstance(result, service_token.ServiceTokenAuthWrapper)
+
+ @mock.patch.object(ks_loading, 'load_auth_from_conf_options',
+ new=mock.Mock())
+ def test_get_auth_plugin_user_auth(self):
+ self.flags(send_service_user_token=True, group='service_user')
+ user_auth = mock.Mock()
+
+ result = service_auth.get_auth_plugin(self.ctx, user_auth=user_auth)
+
+ self.assertEqual(user_auth, result.user_auth)
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
index 07e1774f9a..1e3e50f92b 100644
--- a/nova/tests/unit/virt/hyperv/test_vmops.py
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -1129,7 +1129,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_unplug_vifs.assert_called_once_with(
mock_instance, mock.sentinel.fake_network_info)
mock_disconnect_volumes.assert_called_once_with(
- mock.sentinel.FAKE_BD_INFO)
+ mock.sentinel.FAKE_BD_INFO, force=True)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py
index 66d2c2527f..f289d03632 100644
--- a/nova/tests/unit/virt/hyperv/test_volumeops.py
+++ b/nova/tests/unit/virt/hyperv/test_volumeops.py
@@ -141,7 +141,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
self._volumeops.disconnect_volumes(block_device_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
- block_device_mapping[0]['connection_info'])
+ block_device_mapping[0]['connection_info'], force=False)
+
+ # Verify force=True
+ fake_volume_driver.disconnect_volume.reset_mock()
+ self._volumeops.disconnect_volumes(block_device_info, force=True)
+ fake_volume_driver.disconnect_volume.assert_called_once_with(
+ block_device_mapping[0]['connection_info'], force=True)
@mock.patch('time.sleep')
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
@@ -181,7 +187,7 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
if attach_failed:
fake_volume_driver.disconnect_volume.assert_called_once_with(
- fake_conn_info)
+ fake_conn_info, force=False)
mock_sleep.assert_has_calls(
[mock.call(CONF.hyperv.volume_attach_retry_interval)] *
CONF.hyperv.volume_attach_retry_count)
@@ -203,7 +209,13 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_volume_driver.assert_called_once_with(
mock.sentinel.conn_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_info)
+ mock.sentinel.conn_info, force=False)
+
+ # Verify force=True
+ fake_volume_driver.disconnect_volume.reset_mock()
+ self._volumeops.disconnect_volume(mock.sentinel.conn_info, force=True)
+ fake_volume_driver.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_info, force=True)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_detach_volume(self, mock_get_volume_driver):
@@ -347,7 +359,13 @@ class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase):
self._base_vol_driver.disconnect_volume(conn_info)
self._conn.disconnect_volume.assert_called_once_with(
- conn_info['data'])
+ conn_info['data'], force=False)
+
+ # Verify force=True
+ self._conn.disconnect_volume.reset_mock()
+ self._base_vol_driver.disconnect_volume(conn_info, force=True)
+ self._conn.disconnect_volume.assert_called_once_with(
+ conn_info['data'], force=True)
@mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_res_path')
def _test_get_disk_resource_path_by_conn_info(self,
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 958623f31a..52aa37ac13 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -2542,7 +2542,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_prepare_for_spawn(self, mock_call):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
@@ -2574,7 +2577,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance)
def test_prepare_for_spawn_conflict(self):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
@@ -2582,6 +2588,18 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.prepare_for_spawn,
instance)
+ def test_prepare_for_spawn_not_available(self):
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.CLEANWAIT,
+ power_state=ironic_states.POWER_OFF)
+ self.mock_conn.get_node.return_value = node
+ self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.driver.prepare_for_spawn,
+ instance)
+
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_failed_spawn_cleanup(self, mock_cleanup):
node = ironic_utils.get_test_node(driver='fake')
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 86e3661a34..9d0a8709a4 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -9584,7 +9584,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._disconnect_volume(
self.context, fake_connection_info, fake_instance_1)
mock_volume_driver.disconnect_volume.assert_called_once_with(
- fake_connection_info, fake_instance_1)
+ fake_connection_info, fake_instance_1, force=False)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@@ -9958,7 +9958,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
device_name='vdc',
),
mock.call.detach_encryptor(**encryption),
- mock.call.disconnect_volume(connection_info, instance)])
+ mock.call.disconnect_volume(
+ connection_info,
+ instance,
+ force=False,
+ )
+ ])
get_device_conf_func = mock_detach_with_retry.mock_calls[0][1][2]
self.assertEqual(mock_guest.get_disk, get_device_conf_func.func)
self.assertEqual(('vdc',), get_device_conf_func.args)
@@ -20257,16 +20262,64 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.context,
mock.sentinel.connection_info,
instance,
- destroy_secrets=False
+ destroy_secrets=False,
+ force=True
),
mock.call(
self.context,
mock.sentinel.connection_info,
instance,
- destroy_secrets=True
+ destroy_secrets=True,
+ force=True
)
])
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
+ @mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._should_disconnect_target',
+ new=mock.Mock(return_value=True))
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._detach_encryptor',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vpmems',
+ new=mock.Mock(return_value=None))
+ def test_cleanup_disconnect_volume(self, mock_vol_driver):
+ """Verify that we call disconnect_volume() with force=True
+
+ cleanup() is called by destroy() when an instance is being deleted and
+ force=True should be passed down to os-brick's disconnect_volume()
+ call, which will ensure removal of devices regardless of errors.
+
+ We need to ensure that devices are removed when an instance is being
+ deleted to avoid leaving leftover devices that could later be
+ erroneously connected by external entities (example: multipathd) to
+ instances that should not have access to the volumes.
+
+ See https://bugs.launchpad.net/nova/+bug/2004555 for details.
+ """
+ connection_info = mock.MagicMock()
+ block_device_info = {
+ 'block_device_mapping': [
+ {
+ 'connection_info': connection_info
+ }
+ ]
+ }
+ instance = objects.Instance(self.context, **self.test_instance)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ drvr.cleanup(
+ self.context,
+ instance,
+ network_info={},
+ block_device_info=block_device_info,
+ destroy_vifs=False,
+ destroy_disks=False,
+ )
+ mock_vol_driver.return_value.disconnect_volume.assert_called_once_with(
+ connection_info, instance, force=True)
+
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_allow_native_luksv1')
def test_swap_volume_native_luks_blocked(self, mock_allow_native_luksv1,
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
index 06065322f6..55054652c3 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
@@ -81,3 +81,23 @@ class LibvirtFibreChannelVolumeDriverTestCase(
self.assertEqual(requested_size, new_size)
libvirt_driver.connector.extend_volume.assert_called_once_with(
connection_info['data'])
+
+ def test_disconnect_volume(self):
+ device_path = '/dev/fake-dev'
+ connection_info = {'data': {'device_path': device_path}}
+
+ libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
+ self.fake_host)
+ libvirt_driver.connector.disconnect_volume = mock.MagicMock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance)
+
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], connection_info['data'], force=False)
+
+ # Verify force=True
+ libvirt_driver.connector.disconnect_volume.reset_mock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance, force=True)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], connection_info['data'], force=True)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
index bd516b1dd6..a1111e0d12 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
@@ -57,10 +57,19 @@ class LibvirtISCSIVolumeDriverTestCase(
device=device_path))
libvirt_driver.disconnect_volume(connection_info,
mock.sentinel.instance)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], None, force=False)
msg = mock_LOG_warning.call_args_list[0]
self.assertIn('Ignoring VolumeDeviceNotFound', msg[0][0])
+ # Verify force=True
+ libvirt_driver.connector.disconnect_volume.reset_mock()
+ libvirt_driver.disconnect_volume(
+ connection_info, mock.sentinel.instance, force=True)
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], None, force=True)
+
def test_extend_volume(self):
device_path = '/dev/fake-dev'
connection_info = {'data': {'device_path': device_path}}
diff --git a/nova/tests/unit/virt/libvirt/volume/test_lightos.py b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
index 8a85d73059..f97a696a53 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_lightos.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
@@ -62,7 +62,13 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
connection_info = {'data': disk_info}
lightos_driver.disconnect_volume(connection_info, None)
lightos_driver.connector.disconnect_volume.assert_called_once_with(
- disk_info, None)
+ disk_info, None, force=False)
+
+ # Verify force=True
+ lightos_driver.connector.disconnect_volume.reset_mock()
+ lightos_driver.disconnect_volume(connection_info, None, force=True)
+ lightos_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nvme.py b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
index 3f593841fa..42ef0adc8d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nvme.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
@@ -77,7 +77,13 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
connection_info = {'data': disk_info}
nvme_driver.disconnect_volume(connection_info, None)
nvme_driver.connector.disconnect_volume.assert_called_once_with(
- disk_info, None)
+ disk_info, None, force=False)
+
+ # Verify force=True
+ nvme_driver.connector.disconnect_volume.reset_mock()
+ nvme_driver.disconnect_volume(connection_info, None, force=True)
+ nvme_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
index f0fcba1deb..7d93691d9d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
@@ -49,7 +49,13 @@ class LibvirtScaleIOVolumeDriverTestCase(
conn = {'data': mock.sentinel.conn_data}
sio.disconnect_volume(conn, mock.sentinel.instance)
sio.connector.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_data, None)
+ mock.sentinel.conn_data, None, force=False)
+
+ # Verify force=True
+ sio.connector.disconnect_volume.reset_mock()
+ sio.disconnect_volume(conn, mock.sentinel.instance, force=True)
+ sio.connector.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_data, None, force=True)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_storpool.py b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
index 678d4f8eb4..a3252b8525 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_storpool.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
@@ -53,9 +53,11 @@ class MockStorPoolConnector(object):
}
return {'type': 'block', 'path': test_attached[v]['path']}
- def disconnect_volume(self, connection_info, device_info):
+ def disconnect_volume(self, connection_info, device_info, **kwargs):
self.inst.assertIn('client_id', connection_info)
self.inst.assertIn('volume', connection_info)
+ self.inst.assertIn('force', kwargs)
+ self.inst.assertEqual(self.inst.force, kwargs.get('force'))
v = connection_info['volume']
if v not in test_attached:
@@ -86,6 +88,11 @@ class MockStorPoolInitiator(object):
class LibvirtStorPoolVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
+ def setUp(self):
+ super().setUp()
+ # This is for testing the force flag of disconnect_volume()
+ self.force = False
+
def mock_storpool(f):
def _config_inner_inner1(inst, *args, **kwargs):
@mock.patch(
@@ -175,3 +182,10 @@ class LibvirtStorPoolVolumeDriverTestCase(
libvirt_driver.disconnect_volume(ci_2, mock.sentinel.instance)
self.assertDictEqual({}, test_attached)
+
+ # Connect the volume again so we can detach it again
+ libvirt_driver.connect_volume(ci_2, mock.sentinel.instance)
+ # Verify force=True
+ self.force = True
+ libvirt_driver.disconnect_volume(
+ ci_2, mock.sentinel.instance, force=True)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
index 168efee944..c9e455b193 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
@@ -95,7 +95,13 @@ class LibvirtVZStorageTestCase(test_volume.LibvirtVolumeBaseTestCase):
conn = {'data': mock.sentinel.conn_data}
drv.disconnect_volume(conn, mock.sentinel.instance)
drv.connector.disconnect_volume.assert_called_once_with(
- mock.sentinel.conn_data, None)
+ mock.sentinel.conn_data, None, force=False)
+
+ # Verify force=True
+ drv.connector.disconnect_volume.reset_mock()
+ drv.disconnect_volume(conn, mock.sentinel.instance, force=True)
+ drv.connector.disconnect_volume.assert_called_once_with(
+ mock.sentinel.conn_data, None, force=True)
def test_libvirt_vzstorage_driver_get_config(self):
libvirt_driver = vzstorage.LibvirtVZStorageVolumeDriver(self.fake_host)
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index e53ebe3cb8..f9080726fb 100644
--- a/nova/tests/unit/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -1276,3 +1276,14 @@ class CinderClientTestCase(test.NoDBTestCase):
admin_ctx = context.get_admin_context()
params = cinder._get_cinderclient_parameters(admin_ctx)
self.assertEqual(params[0], mock_admin_auth)
+
+ @mock.patch('nova.service_auth._SERVICE_AUTH')
+ @mock.patch('nova.volume.cinder._ADMIN_AUTH')
+ def test_admin_context_without_user_token_but_with_service_token(
+ self, mock_admin_auth, mock_service_auth
+ ):
+ self.flags(send_service_user_token=True, group='service_user')
+ admin_ctx = context.get_admin_context()
+ params = cinder._get_cinderclient_parameters(admin_ctx)
+ self.assertEqual(mock_admin_auth, params[0].user_auth)
+ self.assertEqual(mock_service_auth, params[0].service_auth)
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 3ec7e90c30..08adeada76 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -747,7 +747,7 @@ class VMOps(object):
# should be disconnected even if the VM doesn't exist anymore,
# so they are not leaked.
self.unplug_vifs(instance, network_info)
- self._volumeops.disconnect_volumes(block_device_info)
+ self._volumeops.disconnect_volumes(block_device_info, force=True)
if destroy_disks:
self._delete_disk_files(instance_name)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index da5b40f375..d2bfed2441 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -59,10 +59,10 @@ class VolumeOps(object):
for vol in volumes:
self.attach_volume(vol['connection_info'], instance_name)
- def disconnect_volumes(self, block_device_info):
+ def disconnect_volumes(self, block_device_info, force=False):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
- self.disconnect_volume(vol['connection_info'])
+ self.disconnect_volume(vol['connection_info'], force=force)
def attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
@@ -116,9 +116,9 @@ class VolumeOps(object):
volume_driver.set_disk_qos_specs(connection_info,
qos_specs)
- def disconnect_volume(self, connection_info):
+ def disconnect_volume(self, connection_info, force=False):
volume_driver = self._get_volume_driver(connection_info)
- volume_driver.disconnect_volume(connection_info)
+ volume_driver.disconnect_volume(connection_info, force=force)
def detach_volume(self, connection_info, instance_name):
LOG.debug("Detaching volume: %(connection_info)s "
@@ -231,8 +231,8 @@ class BaseVolumeDriver(object):
def connect_volume(self, connection_info):
return self._connector.connect_volume(connection_info['data'])
- def disconnect_volume(self, connection_info):
- self._connector.disconnect_volume(connection_info['data'])
+ def disconnect_volume(self, connection_info, force=False):
+ self._connector.disconnect_volume(connection_info['data'], force=force)
def get_disk_resource_path(self, connection_info):
disk_paths = self._connector.get_volume_paths(connection_info['data'])
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 5583ac5236..117294ff89 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -397,6 +397,18 @@ class IronicDriver(virt_driver.ComputeDriver):
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
node = self._get_node(node_uuid)
+
+ # Its possible this node has just moved from deleting
+ # to cleaning. Placement will update the inventory
+ # as all reserved, but this instance might have got here
+ # before that happened, but after the previous allocation
+ # got deleted. We trigger a re-schedule to another node.
+ if (self._node_resources_used(node) or
+ self._node_resources_unavailable(node)):
+ msg = "Chosen ironic node %s is not available" % node_uuid
+ LOG.info(msg, instance=instance)
+ raise exception.ComputeResourcesUnavailable(reason=msg)
+
self._set_instance_id(node, instance)
def failed_spawn_cleanup(self, instance):
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index e247107437..1cbeda426c 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1644,7 +1644,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
self._disconnect_volume(
context, connection_info, instance,
- destroy_secrets=destroy_secrets)
+ destroy_secrets=destroy_secrets, force=True)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if cleanup_instance_disks:
@@ -1961,7 +1961,7 @@ class LibvirtDriver(driver.ComputeDriver):
return (False if connection_count > 1 else True)
def _disconnect_volume(self, context, connection_info, instance,
- encryption=None, destroy_secrets=True):
+ encryption=None, destroy_secrets=True, force=False):
self._detach_encryptor(
context,
connection_info,
@@ -1973,7 +1973,8 @@ class LibvirtDriver(driver.ComputeDriver):
multiattach = connection_info.get('multiattach', False)
if self._should_disconnect_target(
context, instance, multiattach, vol_driver, volume_id):
- vol_driver.disconnect_volume(connection_info, instance)
+ vol_driver.disconnect_volume(
+ connection_info, instance, force=force)
else:
LOG.info('Detected multiple connections on this host for '
'volume: %(volume)s, skipping target disconnect.',
diff --git a/nova/virt/libvirt/volume/fibrechannel.py b/nova/virt/libvirt/volume/fibrechannel.py
index b50db3aa1c..1f890c95c1 100644
--- a/nova/virt/libvirt/volume/fibrechannel.py
+++ b/nova/virt/libvirt/volume/fibrechannel.py
@@ -59,7 +59,7 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
connection_info['data']['multipath_id'] = \
device_info['multipath_id']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach FC Volume", instance=instance)
@@ -69,11 +69,12 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
# the 2nd param of disconnect_volume and be consistent
# with the rest of the connectors.
self.connector.disconnect_volume(connection_info['data'],
- connection_info['data'])
+ connection_info['data'],
+ force=force)
LOG.debug("Disconnected FC Volume", instance=instance)
super(LibvirtFibreChannelVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/fs.py b/nova/virt/libvirt/volume/fs.py
index 5fb9af4a52..992ef45016 100644
--- a/nova/virt/libvirt/volume/fs.py
+++ b/nova/virt/libvirt/volume/fs.py
@@ -116,7 +116,7 @@ class LibvirtMountedFileSystemVolumeDriver(LibvirtBaseFileSystemVolumeDriver,
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
vol_name = connection_info['data']['name']
mountpoint = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/iscsi.py b/nova/virt/libvirt/volume/iscsi.py
index 564bac14cc..2b25972a49 100644
--- a/nova/virt/libvirt/volume/iscsi.py
+++ b/nova/virt/libvirt/volume/iscsi.py
@@ -66,19 +66,20 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume", instance=instance)
try:
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return
LOG.debug("Disconnected iSCSI Volume", instance=instance)
super(LibvirtISCSIVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/lightos.py b/nova/virt/libvirt/volume/lightos.py
index d6d393994e..6a22bf6dc6 100644
--- a/nova/virt/libvirt/volume/lightos.py
+++ b/nova/virt/libvirt/volume/lightos.py
@@ -42,14 +42,15 @@ class LibvirtLightOSVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
LOG.debug("Connecting NVMe volume with device_info %s", device_info)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk. instance:%s, volume_id:%s",
connection_info.get("instance", ""),
connection_info.get("volume_id", ""))
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
super(LibvirtLightOSVolumeDriver, self).disconnect_volume(
- connection_info, instance)
+ connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size=None):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/nvme.py b/nova/virt/libvirt/volume/nvme.py
index 7436552812..e2977c3572 100644
--- a/nova/virt/libvirt/volume/nvme.py
+++ b/nova/virt/libvirt/volume/nvme.py
@@ -45,13 +45,13 @@ class LibvirtNVMEVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk", instance=instance)
self.connector.disconnect_volume(
- connection_info['data'], None)
+ connection_info['data'], None, force=force)
super(LibvirtNVMEVolumeDriver,
- self).disconnect_volume(connection_info, instance)
+ self).disconnect_volume(connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
"""Extend the volume."""
diff --git a/nova/virt/libvirt/volume/quobyte.py b/nova/virt/libvirt/volume/quobyte.py
index bb7a770e57..2eb4bcfb42 100644
--- a/nova/virt/libvirt/volume/quobyte.py
+++ b/nova/virt/libvirt/volume/quobyte.py
@@ -189,7 +189,7 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
instance=instance)
@utils.synchronized('connect_qb_volume')
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/scaleio.py b/nova/virt/libvirt/volume/scaleio.py
index 7c414c2870..04a9423e8e 100644
--- a/nova/virt/libvirt/volume/scaleio.py
+++ b/nova/virt/libvirt/volume/scaleio.py
@@ -57,12 +57,13 @@ class LibvirtScaleIOVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
instance=instance)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
- self.connector.disconnect_volume(connection_info['data'], None)
+ def disconnect_volume(self, connection_info, instance, force=False):
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Disconnected volume", instance=instance)
super(LibvirtScaleIOVolumeDriver, self).disconnect_volume(
- connection_info, instance)
+ connection_info, instance, force=force)
def extend_volume(self, connection_info, instance, requested_size):
LOG.debug("calling os-brick to extend ScaleIO Volume",
diff --git a/nova/virt/libvirt/volume/smbfs.py b/nova/virt/libvirt/volume/smbfs.py
index d112af750c..9de1ce23cd 100644
--- a/nova/virt/libvirt/volume/smbfs.py
+++ b/nova/virt/libvirt/volume/smbfs.py
@@ -52,7 +52,7 @@ class LibvirtSMBFSVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
device_path = self._get_device_path(connection_info)
connection_info['data']['device_path'] = device_path
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(connection_info)
diff --git a/nova/virt/libvirt/volume/storpool.py b/nova/virt/libvirt/volume/storpool.py
index 0e71221f5b..e6dffca39a 100644
--- a/nova/virt/libvirt/volume/storpool.py
+++ b/nova/virt/libvirt/volume/storpool.py
@@ -47,10 +47,11 @@ class LibvirtStorPoolVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
device_info, instance=instance)
connection_info['data']['device_path'] = device_info['path']
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
LOG.debug("Detaching StorPool volume %s",
connection_info['data']['volume'], instance=instance)
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Detached StorPool volume", instance=instance)
def extend_volume(self, connection_info, instance, requested_size):
diff --git a/nova/virt/libvirt/volume/volume.py b/nova/virt/libvirt/volume/volume.py
index 6d650c80e6..f76c3618b2 100644
--- a/nova/virt/libvirt/volume/volume.py
+++ b/nova/virt/libvirt/volume/volume.py
@@ -135,7 +135,7 @@ class LibvirtBaseVolumeDriver(object):
"""Connect the volume."""
pass
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Disconnect the volume."""
pass
diff --git a/nova/virt/libvirt/volume/vzstorage.py b/nova/virt/libvirt/volume/vzstorage.py
index 85ffb45076..babfdef55c 100644
--- a/nova/virt/libvirt/volume/vzstorage.py
+++ b/nova/virt/libvirt/volume/vzstorage.py
@@ -126,9 +126,10 @@ class LibvirtVZStorageVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
return _connect_volume(connection_info, instance)
- def disconnect_volume(self, connection_info, instance):
+ def disconnect_volume(self, connection_info, instance, force=False):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach Vzstorage Volume",
instance=instance)
- self.connector.disconnect_volume(connection_info['data'], None)
+ self.connector.disconnect_volume(
+ connection_info['data'], None, force=force)
LOG.debug("Disconnected Vzstorage Volume", instance=instance)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 01efcfec19..f5328148d2 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -91,12 +91,14 @@ def _get_auth(context):
# from them generated from 'context.get_admin_context'
# which only set is_admin=True but is without token.
# So add load_auth_plugin when this condition appear.
+ user_auth = None
if context.is_admin and not context.auth_token:
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
- return _ADMIN_AUTH
- else:
- return service_auth.get_auth_plugin(context)
+ user_auth = _ADMIN_AUTH
+
+ # When user_auth = None, user_auth will be extracted from the context.
+ return service_auth.get_auth_plugin(context, user_auth=user_auth)
# NOTE(efried): Bug #1752152
diff --git a/releasenotes/notes/service-user-token-421d067c16257782.yaml b/releasenotes/notes/service-user-token-421d067c16257782.yaml
new file mode 100644
index 0000000000..d3af14fbb8
--- /dev/null
+++ b/releasenotes/notes/service-user-token-421d067c16257782.yaml
@@ -0,0 +1,11 @@
+upgrade:
+ - |
+ Configuration of service user tokens is now **required** for all Nova services
+ to ensure security of block-storage volume data.
+
+ All Nova configuration files must configure the ``[service_user]`` section as
+ described in the `documentation`__.
+
+ See https://bugs.launchpad.net/nova/+bug/2004555 for more details.
+
+ __ https://docs.openstack.org/nova/latest/admin/configuration/service-user-token.html
diff --git a/tools/check-cherry-picks.sh b/tools/check-cherry-picks.sh
index 46cef8c225..3042aa1659 100755
--- a/tools/check-cherry-picks.sh
+++ b/tools/check-cherry-picks.sh
@@ -23,7 +23,7 @@ hashes=$(git show --format='%b' --quiet $commit_hash | sed -nr 's/^.cherry picke
checked=0
branches+=""
for hash in $hashes; do
- branch=$(git branch -a --contains "$hash" 2>/dev/null| grep -oE '(master|stable/[a-z]+)')
+ branch=$(git branch -a --contains "$hash" 2>/dev/null| grep -oE '(master|stable/[a-z0-9.]+)')
if [ $? -ne 0 ]; then
echo "Cherry pick hash $hash not on any master or stable branches"
exit 1