summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/parameters.yaml17
-rw-r--r--api-ref/source/servers-actions.inc14
-rw-r--r--devstack/nova-multi-cell-exclude-list.txt4
-rw-r--r--doc/api_samples/versions/v21-version-get-resp.json2
-rw-r--r--doc/api_samples/versions/versions-get-resp.json2
-rw-r--r--doc/notification_samples/common_payloads/ImageMetaPropsPayload.json2
-rw-r--r--doc/source/admin/libvirt-misc.rst30
-rw-r--r--doc/source/cli/nova-status.rst2
-rw-r--r--doc/source/configuration/policy-concepts.rst308
-rw-r--r--doc/source/install/verify.rst4
-rw-r--r--doc/source/user/support-matrix.ini20
-rw-r--r--nova/api/openstack/api_version_request.py3
-rw-r--r--nova/api/openstack/compute/rest_api_version_history.rst8
-rw-r--r--nova/api/openstack/compute/schemas/server_external_events.py4
-rw-r--r--nova/api/openstack/compute/server_external_events.py3
-rw-r--r--nova/api/openstack/compute/servers.py3
-rw-r--r--nova/api/validation/extra_specs/hw.py12
-rw-r--r--nova/cmd/status.py67
-rw-r--r--nova/compute/api.py24
-rw-r--r--nova/compute/manager.py187
-rw-r--r--nova/compute/rpcapi.py18
-rw-r--r--nova/conductor/api.py5
-rw-r--r--nova/conductor/manager.py7
-rw-r--r--nova/conductor/rpcapi.py15
-rw-r--r--nova/conf/compute.py15
-rw-r--r--nova/exception.py15
-rw-r--r--nova/notifications/objects/image.py3
-rw-r--r--nova/objects/external_event.py6
-rw-r--r--nova/objects/image_meta.py9
-rw-r--r--nova/objects/service.py5
-rw-r--r--nova/policies/admin_actions.py4
-rw-r--r--nova/policies/admin_password.py2
-rw-r--r--nova/policies/assisted_volume_snapshots.py4
-rw-r--r--nova/policies/attach_interfaces.py8
-rw-r--r--nova/policies/base.py44
-rw-r--r--nova/policies/console_auth_tokens.py2
-rw-r--r--nova/policies/console_output.py2
-rw-r--r--nova/policies/create_backup.py2
-rw-r--r--nova/policies/deferred_delete.py4
-rw-r--r--nova/policies/evacuate.py2
-rw-r--r--nova/policies/extended_server_attributes.py2
-rw-r--r--nova/policies/floating_ips.py12
-rw-r--r--nova/policies/instance_actions.py8
-rw-r--r--nova/policies/ips.py4
-rw-r--r--nova/policies/limits.py2
-rw-r--r--nova/policies/lock_server.py6
-rw-r--r--nova/policies/migrate_server.py4
-rw-r--r--nova/policies/migrations.py2
-rw-r--r--nova/policies/multinic.py4
-rw-r--r--nova/policies/networks.py4
-rw-r--r--nova/policies/pause_server.py4
-rw-r--r--nova/policies/quota_sets.py14
-rw-r--r--nova/policies/remote_consoles.py2
-rw-r--r--nova/policies/rescue.py4
-rw-r--r--nova/policies/security_groups.py20
-rw-r--r--nova/policies/server_diagnostics.py2
-rw-r--r--nova/policies/server_external_events.py2
-rw-r--r--nova/policies/server_groups.py10
-rw-r--r--nova/policies/server_metadata.py12
-rw-r--r--nova/policies/server_password.py4
-rw-r--r--nova/policies/server_tags.py12
-rw-r--r--nova/policies/server_topology.py4
-rw-r--r--nova/policies/servers.py60
-rw-r--r--nova/policies/servers_migrations.py8
-rw-r--r--nova/policies/shelve.py8
-rw-r--r--nova/policies/simple_tenant_usage.py4
-rw-r--r--nova/policies/suspend_server.py4
-rw-r--r--nova/policies/volumes.py20
-rw-r--r--nova/policies/volumes_attachments.py12
-rw-r--r--nova/tests/fixtures/cinder.py9
-rw-r--r--nova/tests/fixtures/libvirt_imagebackend.py13
-rw-r--r--nova/tests/functional/api_sample_tests/test_evacuate.py21
-rw-r--r--nova/tests/functional/notification_sample_tests/test_instance.py4
-rw-r--r--nova/tests/functional/regressions/test_bug_1732947.py4
-rw-r--r--nova/tests/functional/regressions/test_bug_1902925.py5
-rw-r--r--nova/tests/functional/test_boot_from_volume.py40
-rw-r--r--nova/tests/functional/test_servers.py85
-rw-r--r--nova/tests/unit/cmd/test_policy.py4
-rw-r--r--nova/tests/unit/cmd/test_status.py55
-rw-r--r--nova/tests/unit/compute/test_api.py159
-rw-r--r--nova/tests/unit/compute/test_compute.py24
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py189
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py43
-rw-r--r--nova/tests/unit/conductor/test_conductor.py39
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py61
-rw-r--r--nova/tests/unit/notifications/objects/test_notification.py2
-rw-r--r--nova/tests/unit/objects/test_image_meta.py24
-rw-r--r--nova/tests/unit/objects/test_objects.py4
-rw-r--r--nova/tests/unit/policies/base.py42
-rw-r--r--nova/tests/unit/policies/test_admin_actions.py13
-rw-r--r--nova/tests/unit/policies/test_admin_password.py14
-rw-r--r--nova/tests/unit/policies/test_attach_interfaces.py42
-rw-r--r--nova/tests/unit/policies/test_console_output.py14
-rw-r--r--nova/tests/unit/policies/test_create_backup.py14
-rw-r--r--nova/tests/unit/policies/test_deferred_delete.py22
-rw-r--r--nova/tests/unit/policies/test_evacuate.py13
-rw-r--r--nova/tests/unit/policies/test_floating_ips.py38
-rw-r--r--nova/tests/unit/policies/test_instance_actions.py35
-rw-r--r--nova/tests/unit/policies/test_limits.py4
-rw-r--r--nova/tests/unit/policies/test_lock_server.py19
-rw-r--r--nova/tests/unit/policies/test_migrate_server.py11
-rw-r--r--nova/tests/unit/policies/test_multinic.py22
-rw-r--r--nova/tests/unit/policies/test_networks.py8
-rw-r--r--nova/tests/unit/policies/test_pause_server.py14
-rw-r--r--nova/tests/unit/policies/test_remote_consoles.py14
-rw-r--r--nova/tests/unit/policies/test_rescue.py22
-rw-r--r--nova/tests/unit/policies/test_security_groups.py73
-rw-r--r--nova/tests/unit/policies/test_server_diagnostics.py11
-rw-r--r--nova/tests/unit/policies/test_server_groups.py33
-rw-r--r--nova/tests/unit/policies/test_server_ips.py21
-rw-r--r--nova/tests/unit/policies/test_server_metadata.py26
-rw-r--r--nova/tests/unit/policies/test_server_migrations.py11
-rw-r--r--nova/tests/unit/policies/test_server_password.py34
-rw-r--r--nova/tests/unit/policies/test_server_tags.py26
-rw-r--r--nova/tests/unit/policies/test_server_topology.py22
-rw-r--r--nova/tests/unit/policies/test_servers.py32
-rw-r--r--nova/tests/unit/policies/test_shelve.py16
-rw-r--r--nova/tests/unit/policies/test_simple_tenant_usage.py19
-rw-r--r--nova/tests/unit/policies/test_suspend_server.py14
-rw-r--r--nova/tests/unit/policies/test_tenant_networks.py8
-rw-r--r--nova/tests/unit/policies/test_volumes.py74
-rw-r--r--nova/tests/unit/test_policy.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py11
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py98
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py36
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py74
-rw-r--r--nova/tests/unit/virt/test_hardware.py48
-rw-r--r--nova/tests/unit/virt/test_virt.py27
-rw-r--r--nova/virt/driver.py19
-rw-r--r--nova/virt/hardware.py44
-rw-r--r--nova/virt/ironic/driver.py9
-rw-r--r--nova/virt/libvirt/blockinfo.py11
-rw-r--r--nova/virt/libvirt/driver.py82
-rw-r--r--nova/virt/libvirt/imagebackend.py92
-rw-r--r--nova/virt/libvirt/utils.py70
-rw-r--r--releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml10
-rw-r--r--releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml13
-rw-r--r--releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml36
138 files changed, 2189 insertions, 1202 deletions
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 63f0f58963..ad5e72fb9b 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -4019,14 +4019,15 @@ imageRef:
type: string
imageRef_rebuild:
description: |
- The UUID of the image to rebuild for your server instance.
- It must be a valid UUID otherwise API will return 400.
- If rebuilding a volume-backed server with a new image
- (an image different from the image used when creating the volume),
- the API will return 400.
- For non-volume-backed servers, specifying a new image will result
- in validating that the image is acceptable for the current compute host
- on which the server exists. If the new image is not valid,
+ The UUID of the image to rebuild for your server instance. It
+ must be a valid UUID otherwise API will return 400. To rebuild a
+ volume-backed server with a new image, at least microversion 2.93
+ needs to be provided in the request else the request will fall
+ back to old behaviour i.e. the API will return 400 (for an image
+ different from the image used when creating the volume). For
+ non-volume-backed servers, specifying a new image will result in
+ validating that the image is acceptable for the current compute
+ host on which the server exists. If the new image is not valid,
the server will go into ``ERROR`` status.
in: body
required: true
diff --git a/api-ref/source/servers-actions.inc b/api-ref/source/servers-actions.inc
index f480403a40..3b8b68d4ff 100644
--- a/api-ref/source/servers-actions.inc
+++ b/api-ref/source/servers-actions.inc
@@ -540,7 +540,13 @@ Rebuilds a server.
Specify the ``rebuild`` action in the request body.
This operation recreates the root disk of the server.
-For a volume-backed server, this operation keeps the contents of the volume.
+
+With microversion 2.93, we support rebuilding volume backed
+instances which will reimage the volume with the provided
+image. For microversion < 2.93, this operation keeps the
+contents of the volume given the image provided is same as
+the image with which the volume was created else the opearation
+will error out.
**Preconditions**
@@ -552,8 +558,10 @@ If the server was in status ``SHUTOFF`` before the rebuild, it will be stopped
and in status ``SHUTOFF`` after the rebuild, otherwise it will be ``ACTIVE``
if the rebuild was successful or ``ERROR`` if the rebuild failed.
-.. note:: There is a `known limitation`_ where the root disk is not
- replaced for volume-backed instances during a rebuild.
+.. note:: With microversion 2.93, we support rebuilding volume backed
+ instances. If any microversion < 2.93 is specified, there is a
+ `known limitation`_ where the root disk is not replaced for
+ volume-backed instances during a rebuild.
.. _known limitation: https://bugs.launchpad.net/nova/+bug/1482040
diff --git a/devstack/nova-multi-cell-exclude-list.txt b/devstack/nova-multi-cell-exclude-list.txt
index a61229c906..0dbe383abf 100644
--- a/devstack/nova-multi-cell-exclude-list.txt
+++ b/devstack/nova-multi-cell-exclude-list.txt
@@ -10,3 +10,7 @@
# https://bugs.launchpad.net/nova/+bug/1907511 for details
test_migrate_with_qos_min_bw_allocation
test_resize_with_qos_min_bw_allocation
+
+# Also exclude unshelve to specific host test cases as unshelve cannot move VMs across cells
+# See https://bugs.launchpad.net/nova/+bug/1988316
+tempest.api.compute.admin.test_servers_on_multinodes.UnshelveToHostMultiNodesTest
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index 6e98517b61..78678556bf 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.92",
+ "version": "2.93",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index 5fdd20ae61..59b67279b7 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.92",
+ "version": "2.93",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
index c4af49022f..6aa4d9cbe5 100644
--- a/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
+++ b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
@@ -4,5 +4,5 @@
"hw_architecture": "x86_64"
},
"nova_object.name": "ImageMetaPropsPayload",
- "nova_object.version": "1.10"
+ "nova_object.version": "1.11"
}
diff --git a/doc/source/admin/libvirt-misc.rst b/doc/source/admin/libvirt-misc.rst
index 87dbe18ea4..cf5c10c64e 100644
--- a/doc/source/admin/libvirt-misc.rst
+++ b/doc/source/admin/libvirt-misc.rst
@@ -138,3 +138,33 @@ For example, to hide your signature from the guest OS, run:
.. code:: console
$ openstack flavor set $FLAVOR --property hw:hide_hypervisor_id=true
+
+
+.. _extra-spec-locked_memory:
+
+Locked memory allocation
+------------------------
+
+.. versionadded:: 26.0.0 (Zed)
+
+Locking memory marks the guest memory allocations as unmovable and
+unswappable. It is implicitly enabled in a number of cases such as SEV or
+realtime guests but can also be enabled explictly using the
+``hw:locked_memory`` extra spec (or use ``hw_locked_memory`` image property).
+``hw:locked_memory`` (also ``hw_locked_memory`` image property) accept
+boolean values in string format like 'true' or 'false' value.
+It will raise `FlavorImageLockedMemoryConflict` exception if both flavor and
+image property are specified but with different boolean values.
+This will only be allowed if you have also set ``hw:mem_page_size``,
+so we can ensure that the scheduler can actually account for this correctly
+and prevent out of memory events. Otherwise, will raise `LockMemoryForbidden`
+exception.
+
+.. code:: console
+
+ $ openstack flavor set FLAVOR-NAME \
+ --property hw:locked_memory=BOOLEAN_VALUE
+
+.. note::
+
+ This is currently only supported by the libvirt driver.
diff --git a/doc/source/cli/nova-status.rst b/doc/source/cli/nova-status.rst
index a198159e17..5fbb23f388 100644
--- a/doc/source/cli/nova-status.rst
+++ b/doc/source/cli/nova-status.rst
@@ -137,7 +137,7 @@ Upgrade
* Checks for the Placement API are modified to require version 1.35.
* Checks for the policy files are not automatically overwritten with
- new defaults.
+ new defaults. This check has been dropped in 26.0.0 (Zed) release.
**22.0.0 (Victoria)**
diff --git a/doc/source/configuration/policy-concepts.rst b/doc/source/configuration/policy-concepts.rst
index b2df1e4c1b..dd0c4686bd 100644
--- a/doc/source/configuration/policy-concepts.rst
+++ b/doc/source/configuration/policy-concepts.rst
@@ -65,36 +65,13 @@ represent the layer of authorization required to access an API.
.. note::
- The ``scope_type`` of each policy is hardcoded and is not
- overridable via the policy file.
+ The ``scope_type`` of each policy is hardcoded to ``project`` scoped
+ and is not overridable via the policy file.
Nova policies have implemented the scope concept by defining the ``scope_type``
-in policies. To know each policy's ``scope_type``, please refer to the
-:doc:`Policy Reference </configuration/policy>` and look for ``Scope Types`` or
-``Intended scope(s)`` in :doc:`Policy Sample File </configuration/sample-policy>`
-as shown in below examples.
-
-.. rubric:: ``system`` scope
-
-Policies with a ``scope_type`` of ``system`` means a user with a
-``system-scoped`` token has permission to access the resource. This can be
-seen as a global role. All the system-level operation's policies
-have defaulted to ``scope_type`` of ``['system']``.
-
-For example, consider the ``GET /os-hypervisors`` API.
-
-.. code::
-
- # List all hypervisors.
- # GET /os-hypervisors
- # Intended scope(s): system
- #"os_compute_api:os-hypervisors:list": "rule:system_reader_api"
-
-.. rubric:: ``project`` scope
-
-Policies with a ``scope_type`` of ``project`` means a user with a
-``project-scoped`` token has permission to access the resource. Project-level
-only operation's policies are defaulted to ``scope_type`` of ``['project']``.
+for all the policies to ``project`` scoped. It means if user tries to access
+nova APIs with ``system`` scoped token they will get 403 permission denied
+error.
For example, consider the ``POST /os-server-groups`` API.
@@ -105,28 +82,6 @@ For example, consider the ``POST /os-server-groups`` API.
# Intended scope(s): project
#"os_compute_api:os-server-groups:create": "rule:project_member_api"
-.. rubric:: ``system and project`` scope
-
-Policies with a ``scope_type`` of ``system and project`` means a user with a
-``system-scoped`` or ``project-scoped`` token has permission to access the
-resource. All the system and project level operation's policies have defaulted
-to ``scope_type`` of ``['system', 'project']``.
-
-For example, consider the ``GET /flavors/{flavor_id}/os-extra_specs/{flavor_extra_spec_key}``
-API.
-
-.. code::
-
- # Show an extra spec for a flavor
- # GET /flavors/{flavor_id}/os-extra_specs/{flavor_extra_spec_key}
- # Intended scope(s): system, project
- #"os_compute_api:os-flavor-extra-specs:show": "rule:project_reader_or_admin"
-
-These scope types provide a way to differentiate between system-level and
-project-level access roles. You can control the information with scope of the
-users. This means you can control that none of the project level role can get
-the hypervisor information.
-
Policy scope is disabled by default to allow operators to migrate from
the old policy enforcement system in a graceful way. This can be
enabled by configuring the :oslo.config:option:`oslo_policy.enforce_scope`
@@ -149,52 +104,129 @@ defaults for each policy.
.. rubric:: ``reader``
-This provides read-only access to the resources within the ``system`` or
-``project``. Nova policies are defaulted to below rules:
-
-.. code::
-
- system_reader_api
- Default
- role:reader and system_scope:all
-
- system_or_project_reader
- Default
- (rule:system_reader_api) or (role:reader and project_id:%(project_id)s)
+This provides read-only access to the resources. Nova policies are defaulted
+to below rules:
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="project_reader",
+ check_str="role:reader and project_id:%(project_id)s",
+ description="Default rule for Project level read only APIs."
+ )
+
+Using it in policy rule (with admin + reader access): (because we want to keep legacy admin behavior the same we need to give access of reader APIs to admin role too.)
+
+.. code-block:: python
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:show',
+ check_str='role:admin or (' + 'role:reader and project_id:%(project_id)s)',
+ description="Show a server",
+ operations=[
+ {
+ 'method': 'GET',
+ 'path': '/servers/{server_id}'
+ }
+ ],
+ scope_types=['project'],
+ )
+
+OR
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="admin_api",
+ check_str="role:admin",
+ description="Default rule for administrative APIs."
+ )
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:show',
+ check_str='rule: admin or rule:project_reader',
+ description='Show a server',
+ operations=[
+ {
+ 'method': 'GET',
+ 'path': '/servers/{server_id}'
+ }
+ ],
+ scope_types=['project'],
+ )
.. rubric:: ``member``
-This role is to perform the project level write operation with combination
-to the system admin. Nova policies are defaulted to below rules:
-
-.. code::
-
- project_member_api
- Default
- role:member and project_id:%(project_id)s
-
- system_admin_or_owner
- Default
- (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s)
+project-member is denoted by someone with the member role on a project. It is
+intended to be used by end users who consume resources within a project
+which requires higher permission than reader role but less than admin role.
+It inherits all the permissions of a project-reader.
+
+project-member persona in the policy check string:
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="project_member",
+ check_str="role:member and project_id:%(project_id)s",
+ description="Default rule for Project level non admin APIs."
+ )
+
+Using it in policy rule (with admin + member access): (because we want to keep legacy admin behavior, admin role gets access to the project level member APIs.)
+
+.. code-block:: python
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:create',
+ check_str='role:admin or (' + 'role:member and project_id:%(project_id)s)',
+ description='Create a server',
+ operations=[
+ {
+ 'method': 'POST',
+ 'path': '/servers'
+ }
+ ],
+ scope_types=['project'],
+ )
+
+OR
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="admin_api",
+ check_str="role:admin",
+ description="Default rule for administrative APIs."
+ )
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:create',
+ check_str='rule_admin or rule:project_member',
+ description='Create a server',
+ operations=[
+ {
+ 'method': 'POST',
+ 'path': '/servers'
+ }
+ ],
+ scope_types=['project'],
+ )
+
+'project_id:%(project_id)s' in the check_str is important to restrict the
+access within the requested project.
.. rubric:: ``admin``
-This role is to perform the admin level write operation at system as well
-as at project-level operations. Nova policies are defaulted to below rules:
-
-.. code::
-
- system_admin_api
- Default
- role:admin and system_scope:all
+This role is to perform the admin level write operations. Nova policies are
+defaulted to below rules:
- project_admin_api
- Default
- role:admin and project_id:%(project_id)s
+.. code-block:: python
- system_admin_or_owner
- Default
- (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s)
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:os-hypervisors:list',
+ check_str='role:admin',
+ scope_types=['project']
+ )
With these new defaults, you can solve the problem of:
@@ -203,8 +235,8 @@ With these new defaults, you can solve the problem of:
your deployment for security purposes.
#. Customize the policy in better way. For example, you will be able
- to provide access to project level user to perform live migration for their
- server or any other project with their token.
+ to provide access to project level user to perform operations within
+ their project only.
Nova supported scope & Roles
-----------------------------
@@ -212,40 +244,21 @@ Nova supported scope & Roles
Nova supports the below combination of scopes and roles where roles can be
overridden in the policy.yaml file but scope is not override-able.
-#. ADMIN: ``admin`` role on ``system`` scope. This is System Administrator to
- perform the system level resource operations. Example: enable/disable compute
- services.
-
-#. PROJECT_ADMIN: ``admin`` role on ``project`` scope. This is used to perform
- admin level operation within project. For example: Live migrate server.
-
- .. note::
-
- PROJECT_ADMIN has the limitation for the below policies
-
- * ``os_compute_api:servers:create:forced_host``
- * ``os_compute_api:servers:compute:servers:create:requested_destination``
-
- To create a server on specific host via force host or requested
- destination, you need to pass the hostname in ``POST /servers``
- API request but there is no way for PROJECT_ADMIN to get the hostname
- via API. This limitation will be addressed in a future release.
-
+#. ADMIN: ``admin`` role on ``project`` scope. This is an administrator to
+ perform the admin level operations. Example: enable/disable compute
+ service, Live migrate server etc.
#. PROJECT_MEMBER: ``member`` role on ``project`` scope. This is used to perform
resource owner level operation within project. For example: Pause a server.
-
#. PROJECT_READER: ``reader`` role on ``project`` scope. This is used to perform
read-only operation within project. For example: Get server.
+#. PROJECT_MEMBER_OR_ADMIN: ``admin`` or ``member`` role on ``project`` scope. Such policy rules are default to most of the owner level APIs and aling
+ with `member` role legacy admin can continue to access those APIs.
-#. PROJECT_READER_OR_ADMIN: ``admin`` role on ``system`` scope
- or ``reader`` role on ``project`` scope. Such policy rules are scoped
- as both ``system`` as well as ``project``. Example: to allow system
- admin and project reader to list flavor extra specs.
-
- .. note:: As of now, only ``system`` and ``project`` scopes are supported in Nova.
+#. PROJECT_READER_OR_ADMIN: ``admin`` or ``reader`` role on ``project`` scope. Such policy rules are default to most of the read only APIs so that legacy
+ admin can continue to access those APIs.
Backward Compatibility
----------------------
@@ -253,10 +266,10 @@ Backward Compatibility
Backward compatibility with versions prior to 21.0.0 (Ussuri) is maintained by
supporting the old defaults and disabling the ``scope_type`` feature by default.
This means the old defaults and deployments that use them will keep working
-as-is. However, we encourage every deployment to switch to new policy.
-Scope checks are disabled by default and will be enabled by default starting
-Nova 26.0.0 (OpenStack Zed release) and the old defaults will be removed
-starting in the Nova 27.0.0 release.
+as-is. However, we encourage every deployment to switch to the new policy. The
+new defaults will be enabled by default in OpenStack 2023.1 (Nova 27.0.0)
+release and old defaults will be removed starting in the OpenStack 2023.2
+(Nova 28.0.0) release.
To implement the new default reader roles, some policies needed to become
granular. They have been renamed, with the old names still supported for
@@ -275,7 +288,6 @@ Here is step wise guide for migration:
You need to create the new token with scope knowledge via below CLI:
- - :keystone-doc:`Create System Scoped Token </admin/tokens-overview.html#operation_create_system_token>`.
- :keystone-doc:`Create Project Scoped Token </admin/tokens-overview.html#operation_create_project_scoped_token>`.
#. Create new default roles in keystone if not done:
@@ -295,10 +307,6 @@ Here is step wise guide for migration:
(assuming the rest of the policy passes). The default value of this flag
is False.
- .. note:: Before you enable this flag, you need to audit your users and make
- sure everyone who needs system-level access has a system role
- assignment in keystone.
-
#. Enable new defaults
The :oslo.config:option:`oslo_policy.enforce_new_defaults` flag switches
@@ -311,7 +319,6 @@ Here is step wise guide for migration:
.. note:: Before you enable this flag, you need to educate users about the
different roles they need to use to continue using Nova APIs.
-
#. Check for deprecated policies
A few policies were made more granular to implement the reader roles. New
@@ -319,28 +326,31 @@ Here is step wise guide for migration:
are overwritten in policy file, then warning will be logged. Please migrate
those policies to new policy names.
+NOTE::
+
+ We recommend to enable the both scope as well new defaults together
+ otherwise you may experience some late failures with unclear error
+ messages. For example, if you enable new defaults and disable scope
+ check then it will allow system users to access the APIs but fail
+ later due to the project check which can be difficult to debug.
+
Below table show how legacy rules are mapped to new rules:
-+--------------------+----------------------------------+-----------------+-------------------+
-| Legacy Rules | New Rules | | |
-+====================+==================================+=================+===================+
-| | | *Roles* | *Scope* |
-| +----------------------------------+-----------------+-------------------+
-| | ADMIN | admin | system |
-| Project Admin +----------------------------------+-----------------+ |
-| Role | PROJECT_ADMIN | admin | project |
-| | | | |
-+--------------------+----------------------------------+-----------------+-------------------+
-| | PROJECT_ADMIN | admin | project |
-| +----------------------------------+-----------------+ |
-| | PROJECT_MEMBER | member | |
-| +----------------------------------+-----------------+ |
-| Project admin or | PROJECT_READER | reader | |
-| owner role +----------------------------------+-----------------+-------------------+
-| | PROJECT_READER_OR_ADMIN | admin on system | system |
-| | | or reader on | OR |
-| | | project | project |
-+--------------------+----------------------------------+-----------------+-------------------+
-
-We expect all deployments to migrate to new policy by 27.0.0 release so that
-we can remove the support of old policies.
++--------------------+---------------------------+----------------+-----------+
+| Legacy Rule | New Rules |Operation |scope_type |
++====================+===========================+================+===========+
+| RULE_ADMIN_API |-> ADMIN |Global resource | [project] |
+| | |Write & Read | |
++--------------------+---------------------------+----------------+-----------+
+| |-> ADMIN |Project admin | [project] |
+| | |level operation | |
+| +---------------------------+----------------+-----------+
+| RULE_ADMIN_OR_OWNER|-> PROJECT_MEMBER_OR_ADMIN |Project resource| [project] |
+| | |Write | |
+| +---------------------------+----------------+-----------+
+| |-> PROJECT_READER_OR_ADMIN |Project resource| [project] |
+| | |Read | |
++--------------------+---------------------------+----------------+-----------+
+
+We expect all deployments to migrate to the new policy by OpenStack 2023.1
+(Nova 27.0.0) release so that we can remove the support of old policies.
diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst
index 99936c1d9c..c4e0383af4 100644
--- a/doc/source/install/verify.rst
+++ b/doc/source/install/verify.rst
@@ -119,10 +119,6 @@ Verify operation of the Compute service.
| Result: Success |
| Details: None |
+--------------------------------------------------------------------+
- | Check: Policy Scope-based Defaults |
- | Result: Success |
- | Details: None |
- +--------------------------------------------------------------------+
| Check: Policy File JSON to YAML Migration |
| Result: Success |
| Details: None |
diff --git a/doc/source/user/support-matrix.ini b/doc/source/user/support-matrix.ini
index 412623b4a3..ae5bbde110 100644
--- a/doc/source/user/support-matrix.ini
+++ b/doc/source/user/support-matrix.ini
@@ -332,6 +332,26 @@ driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
driver.zvm=unknown
+[operation.rebuild-volume-backed]
+title=Rebuild volume backed instance
+status=optional
+notes=This will wipe out all existing data in the root volume
+ of a volume backed instance. This is available from microversion
+ 2.93 and onwards.
+cli=openstack server rebuild --reimage-boot-volume --image <image> <server>
+driver.libvirt-kvm-x86=complete
+driver.libvirt-kvm-aarch64=complete
+driver.libvirt-kvm-ppc64=complete
+driver.libvirt-kvm-s390x=complete
+driver.libvirt-qemu-x86=complete
+driver.libvirt-lxc=unknown
+driver.vmware=missing
+driver.hyperv=missing
+driver.ironic=missing
+driver.libvirt-vz-vm=missing
+driver.libvirt-vz-ct=missing
+driver.zvm=missing
+
[operation.get-guest-info]
title=Guest instance status
status=mandatory
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
index a3a8b1f41e..84d8872f9e 100644
--- a/nova/api/openstack/api_version_request.py
+++ b/nova/api/openstack/api_version_request.py
@@ -252,6 +252,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
* 2.92 - Drop generation of keypair, add keypair name validation on
``POST /os-keypairs`` and allow including @ and dot (.) characters
in keypair name.
+ * 2.93 - Add support for volume backed server rebuild.
"""
# The minimum and maximum versions of the API supported
@@ -260,7 +261,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = '2.1'
-_MAX_API_VERSION = '2.92'
+_MAX_API_VERSION = '2.93'
DEFAULT_API_VERSION = _MIN_API_VERSION
# Almost all proxy APIs which are related to network, images and baremetal
diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst
index b65e50c62f..8f96a34e7d 100644
--- a/nova/api/openstack/compute/rest_api_version_history.rst
+++ b/nova/api/openstack/compute/rest_api_version_history.rst
@@ -1219,3 +1219,11 @@ Add support to pin a server to an availability zone or unpin a server from any a
The ``POST /os-keypairs`` API now forbids to generate a keypair and allows new
safe characters, specifically '@' and '.' (dot character).
+
+2.93
+----
+
+Add support for volume backed server rebuild. The end user will provide the
+image with the rebuild command and it will rebuild the volume with the new
+image similar to the result of rebuilding an ephemeral disk.
+
diff --git a/nova/api/openstack/compute/schemas/server_external_events.py b/nova/api/openstack/compute/schemas/server_external_events.py
index b8a89e047d..6ac3f009ec 100644
--- a/nova/api/openstack/compute/schemas/server_external_events.py
+++ b/nova/api/openstack/compute/schemas/server_external_events.py
@@ -63,3 +63,7 @@ name['enum'].append('power-update')
create_v282 = copy.deepcopy(create_v276)
name = create_v282['properties']['events']['items']['properties']['name']
name['enum'].append('accelerator-request-bound')
+
+create_v293 = copy.deepcopy(create_v282)
+name = create_v293['properties']['events']['items']['properties']['name']
+name['enum'].append('volume-reimaged')
diff --git a/nova/api/openstack/compute/server_external_events.py b/nova/api/openstack/compute/server_external_events.py
index 55f17e3541..23813d5790 100644
--- a/nova/api/openstack/compute/server_external_events.py
+++ b/nova/api/openstack/compute/server_external_events.py
@@ -69,7 +69,8 @@ class ServerExternalEventsController(wsgi.Controller):
@validation.schema(server_external_events.create, '2.0', '2.50')
@validation.schema(server_external_events.create_v251, '2.51', '2.75')
@validation.schema(server_external_events.create_v276, '2.76', '2.81')
- @validation.schema(server_external_events.create_v282, '2.82')
+ @validation.schema(server_external_events.create_v282, '2.82', '2.92')
+ @validation.schema(server_external_events.create_v293, '2.93')
def create(self, req, body):
"""Creates a new instance event."""
context = req.environ['nova.context']
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 88f5fd4f8e..6a9bf1fa92 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -1205,6 +1205,9 @@ class ServersController(wsgi.Controller):
):
kwargs['hostname'] = rebuild_dict['hostname']
+ if api_version_request.is_supported(req, min_version='2.93'):
+ kwargs['reimage_boot_volume'] = True
+
for request_attribute, instance_attribute in attr_map.items():
try:
if request_attribute == 'name':
diff --git a/nova/api/validation/extra_specs/hw.py b/nova/api/validation/extra_specs/hw.py
index bb23e7ce8e..02e8de9cf2 100644
--- a/nova/api/validation/extra_specs/hw.py
+++ b/nova/api/validation/extra_specs/hw.py
@@ -163,6 +163,18 @@ hugepage_validators = [
'pattern': r'(large|small|any|\d+([kKMGT]i?)?(b|bit|B)?)',
},
),
+ base.ExtraSpecValidator(
+ name='hw:locked_memory',
+ description=(
+ 'Determine if **guest** (instance) memory should be locked '
+ 'preventing swaping. This is required in rare cases for device '
+ 'DMA transfers. Only supported by the libvirt virt driver.'
+ ),
+ value={
+ 'type': bool,
+ 'description': 'Whether to lock **guest** (instance) memory.',
+ },
+ ),
]
numa_validators = [
diff --git a/nova/cmd/status.py b/nova/cmd/status.py
index 048ca6d1d3..29e4a5d01e 100644
--- a/nova/cmd/status.py
+++ b/nova/cmd/status.py
@@ -41,7 +41,6 @@ from nova.objects import cell_mapping as cell_mapping_obj
# to be registered under nova.objects when called via _check_machine_type_set
from nova.objects import image_meta as image_meta_obj # noqa: F401
from nova.objects import instance as instance_obj # noqa: F401
-from nova import policy
from nova import utils
from nova import version
from nova.virt.libvirt import machine_type_utils
@@ -249,70 +248,6 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
str(ex))
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
- def _check_policy(self):
- """Checks to see if policy file is overwritten with the new
- defaults.
- """
- msg = _("Your policy file contains rules which examine token scope, "
- "which may be due to generation with the new defaults. "
- "If that is done intentionally to migrate to the new rule "
- "format, then you are required to enable the flag "
- "'oslo_policy.enforce_scope=True' and educate end users on "
- "how to request scoped tokens from Keystone. Another easy "
- "and recommended way for you to achieve the same is via two "
- "flags, 'oslo_policy.enforce_scope=True' and "
- "'oslo_policy.enforce_new_defaults=True' and avoid "
- "overwriting the file. Please refer to this document to "
- "know the complete migration steps: "
- "https://docs.openstack.org/nova/latest/configuration"
- "/policy-concepts.html. If you did not intend to migrate "
- "to new defaults in this upgrade, then with your current "
- "policy file the scope checking rule will fail. A possible "
- "reason for such a policy file is that you generated it with "
- "'oslopolicy-sample-generator' in json format. "
- "Three ways to fix this until you are ready to migrate to "
- "scoped policies: 1. Generate the policy file with "
- "'oslopolicy-sample-generator' in yaml format, keep "
- "the generated content commented out, and update "
- "the generated policy.yaml location in "
- "``oslo_policy.policy_file``. "
- "2. Use a pre-existing sample config file from the Train "
- "release. 3. Use an empty or non-existent file to take all "
- "the defaults.")
- rule = "context_is_admin"
- rule_new_default = "role:admin and system_scope:all"
- status = upgradecheck.Result(upgradecheck.Code.SUCCESS)
- # NOTE(gmann): Initialise the policy if it not initialized.
- # We need policy enforcer with all the rules loaded to check
- # their value with defaults.
- try:
- if policy._ENFORCER is None:
- policy.init(suppress_deprecation_warnings=True)
-
- # For safer side, recheck that the enforcer is available before
- # upgrade checks. If something is wrong on oslo side and enforcer
- # is still not available the return warning to avoid any false
- # result.
- if policy._ENFORCER is not None:
- current_rule = str(policy._ENFORCER.rules[rule]).strip("()")
- if (current_rule == rule_new_default and
- not CONF.oslo_policy.enforce_scope):
- status = upgradecheck.Result(upgradecheck.Code.WARNING,
- msg)
- else:
- status = upgradecheck.Result(
- upgradecheck.Code.WARNING,
- _('Policy is not initialized to check the policy rules'))
- except Exception as ex:
- status = upgradecheck.Result(
- upgradecheck.Code.WARNING,
- _('Unable to perform policy checks due to error: %s') %
- str(ex))
- # reset the policy state so that it can be initialized from fresh if
- # operator changes policy file after running this upgrade checks.
- policy.reset()
- return status
-
def _check_old_computes(self):
# warn if there are computes in the system older than the previous
# major release
@@ -350,8 +285,6 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
(_('Placement API'), _check_placement),
# Added in Train
(_('Cinder API'), _check_cinder),
- # Added in Ussuri
- (_('Policy Scope-based Defaults'), _check_policy),
# Added in Victoria
(
_('Policy File JSON to YAML Migration'),
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 9fc4ca24a3..c06fefdd3c 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -3589,7 +3589,7 @@ class API:
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
- files_to_inject=None, **kwargs):
+ files_to_inject=None, reimage_boot_volume=False, **kwargs):
"""Rebuild the given instance with the provided attributes."""
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
@@ -3670,15 +3670,16 @@ class API:
orig_image_ref = volume_image_metadata.get('image_id')
if orig_image_ref != image_href:
- # Leave a breadcrumb.
- LOG.debug('Requested to rebuild instance with a new image %s '
- 'for a volume-backed server with image %s in its '
- 'root volume which is not supported.', image_href,
- orig_image_ref, instance=instance)
- msg = _('Unable to rebuild with a different image for a '
- 'volume-backed server.')
- raise exception.ImageUnacceptable(
- image_id=image_href, reason=msg)
+ if not reimage_boot_volume:
+ # Leave a breadcrumb.
+ LOG.debug('Requested to rebuild instance with a new image '
+ '%s for a volume-backed server with image %s in '
+ 'its root volume which is not supported.',
+ image_href, orig_image_ref, instance=instance)
+ msg = _('Unable to rebuild with a different image for a '
+ 'volume-backed server.')
+ raise exception.ImageUnacceptable(
+ image_id=image_href, reason=msg)
else:
orig_image_ref = instance.image_ref
@@ -3793,7 +3794,8 @@ class API:
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=host,
- request_spec=request_spec)
+ request_spec=request_spec,
+ reimage_boot_volume=reimage_boot_volume)
def _check_volume_status(self, context, bdms):
"""Check whether the status of the volume is "in-use".
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 246cc92dd5..021c6f9d44 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -31,6 +31,7 @@ import contextlib
import copy
import functools
import inspect
+import math
import sys
import time
import traceback
@@ -615,7 +616,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
- target = messaging.Target(version='6.0')
+ target = messaging.Target(version='6.1')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -3398,18 +3399,124 @@ class ComputeManager(manager.Manager):
migration.status = status
migration.save()
+ @staticmethod
+ def _reimage_failed_callback(event_name, instance):
+ msg = ('Cinder reported failure during reimaging '
+ 'with %(event)s for instance %(uuid)s')
+ msg_args = {'event': event_name, 'uuid': instance.uuid}
+ LOG.error(msg, msg_args)
+ raise exception.ReimageException(msg % msg_args)
+
+ def _detach_root_volume(self, context, instance, root_bdm):
+ volume_id = root_bdm.volume_id
+ mp = root_bdm.device_name
+ old_connection_info = jsonutils.loads(root_bdm.connection_info)
+ try:
+ self.driver.detach_volume(context, old_connection_info,
+ instance, root_bdm.device_name)
+ except exception.DiskNotFound as err:
+ LOG.warning('Ignoring DiskNotFound exception while '
+ 'detaching volume %(volume_id)s from '
+ '%(mp)s : %(err)s',
+ {'volume_id': volume_id, 'mp': mp,
+ 'err': err}, instance=instance)
+ except exception.DeviceDetachFailed:
+ with excutils.save_and_reraise_exception():
+ LOG.warning('Guest refused to detach volume %(vol)s',
+ {'vol': volume_id}, instance=instance)
+ self.volume_api.roll_detaching(context, volume_id)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception('Failed to detach volume '
+ '%(volume_id)s from %(mp)s',
+ {'volume_id': volume_id, 'mp': mp},
+ instance=instance)
+ self.volume_api.roll_detaching(context, volume_id)
+
+ def _rebuild_volume_backed_instance(self, context, instance, bdms,
+ image_id):
+ # Get root bdm and attachment ID associated to it
+ root_bdm = compute_utils.get_root_bdm(context, instance, bdms)
+ old_attachment_id = root_bdm.attachment_id
+
+ # Create a new attachment and delete the previous attachment
+ # We create a new attachment first to keep the volume in
+ # reserved state after old attachment is deleted and avoid any
+ # races in between the attachment create and delete.
+ attachment_id = None
+ try:
+ attachment_id = self.volume_api.attachment_create(
+ context, root_bdm.volume_id, instance.uuid)['id']
+ self._detach_root_volume(context, instance, root_bdm)
+ root_bdm.attachment_id = attachment_id
+ root_bdm.save()
+ self.volume_api.attachment_delete(context,
+ old_attachment_id)
+ except exception.InstanceNotFound:
+ # This means we failed to save the new attachment because
+ # the instance is deleted, so (try to) delete it and abort.
+ try:
+ self.volume_api.attachment_delete(context,
+ attachment_id)
+ except cinder_exception.ClientException:
+ LOG.error('Failed to delete new attachment %s',
+ attachment_id)
+ msg = _('Failed to rebuild volume backed instance.')
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+ except cinder_exception.ClientException:
+ if attachment_id:
+ LOG.error('Failed to delete old attachment %s',
+ old_attachment_id)
+ else:
+ LOG.error('Failed to create new attachment')
+ msg = _('Failed to rebuild volume backed instance.')
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+ events = [('volume-reimaged', root_bdm.volume_id)]
+
+ # Get the image requested for rebuild
+ try:
+ image = self.image_api.get(context, image_id)
+ except exception.ImageNotFound:
+ msg = _('Image %s not found.') % image_id
+ LOG.error(msg)
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+ image_size = int(math.ceil(float(image.get('size')) / units.Gi))
+ deadline = CONF.reimage_timeout_per_gb * image_size
+ error_cb = self._reimage_failed_callback
+
+ # Call cinder to perform reimage operation and wait until an
+ # external event is triggered.
+ try:
+ with self.virtapi.wait_for_instance_event(instance, events,
+ deadline=deadline,
+ error_callback=error_cb):
+ self.volume_api.reimage_volume(
+ context, root_bdm.volume_id, image_id,
+ reimage_reserved=True)
+
+ except Exception as ex:
+ LOG.error('Failed to rebuild volume backed instance: %s',
+ str(ex), instance=instance)
+ msg = _('Failed to rebuild volume backed instance.')
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+
def _rebuild_default_impl(
self, context, instance, image_meta, injected_files,
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None, evacuate=False,
block_device_info=None, preserve_ephemeral=False,
- accel_uuids=None):
+ accel_uuids=None, reimage_boot_volume=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
accel_info = []
+ detach_root_bdm = not reimage_boot_volume
if evacuate:
if instance.flavor.extra_specs.get('accel:device_profile'):
try:
@@ -3421,13 +3528,36 @@ class ComputeManager(manager.Manager):
msg = _('Failure getting accelerator resources.')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
- detach_block_devices(context, bdms)
+ detach_block_devices(context, bdms,
+ detach_root_bdm=detach_root_bdm)
else:
self._power_off_instance(instance, clean_shutdown=True)
- detach_block_devices(context, bdms)
- self.driver.destroy(context, instance,
- network_info=network_info,
- block_device_info=block_device_info)
+ detach_block_devices(context, bdms,
+ detach_root_bdm=detach_root_bdm)
+ if reimage_boot_volume:
+ # Previously, the calls reaching here were for image
+ # backed instance rebuild and didn't have a root bdm
+ # so now we need to handle the case for root bdm.
+ # For the root BDM, we are doing attach/detach operations
+ # manually as we want to maintain a 'reserved' state
+ # throughout the reimage process from the cinder side so
+ # we are excluding the root BDM from certain operations
+ # here i.e. deleteing it's mapping before the destroy call.
+ block_device_info_copy = copy.deepcopy(block_device_info)
+ root_bdm = compute_utils.get_root_bdm(context, instance, bdms)
+ mapping = block_device_info_copy["block_device_mapping"]
+ # drop root bdm from the mapping
+ mapping = [
+ bdm for bdm in mapping
+ if bdm["volume_id"] != root_bdm.volume_id
+ ]
+ self.driver.destroy(context, instance,
+ network_info=network_info,
+ block_device_info=block_device_info_copy)
+ else:
+ self.driver.destroy(context, instance,
+ network_info=network_info,
+ block_device_info=block_device_info)
try:
accel_info = self._get_accel_info(context, instance)
except Exception as exc:
@@ -3436,6 +3566,12 @@ class ComputeManager(manager.Manager):
msg = _('Failure getting accelerator resources.')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
+ if reimage_boot_volume:
+ is_volume_backed = compute_utils.is_volume_backed_instance(
+ context, instance, bdms)
+ if is_volume_backed:
+ self._rebuild_volume_backed_instance(
+ context, instance, bdms, image_meta.id)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
@@ -3470,7 +3606,8 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
- scheduled_node, limits, request_spec, accel_uuids):
+ scheduled_node, limits, request_spec, accel_uuids,
+ reimage_boot_volume):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -3502,6 +3639,9 @@ class ComputeManager(manager.Manager):
specified by the user, this will be None
:param request_spec: a RequestSpec object used to schedule the instance
:param accel_uuids: a list of cyborg ARQ uuids
+ :param reimage_boot_volume: Boolean to specify whether the user has
+ explicitly requested to rebuild a boot
+ volume
"""
# recreate=True means the instance is being evacuated from a failed
@@ -3566,7 +3706,7 @@ class ComputeManager(manager.Manager):
image_meta, injected_files, new_pass, orig_sys_metadata,
bdms, evacuate, on_shared_storage, preserve_ephemeral,
migration, request_spec, allocs, rebuild_claim,
- scheduled_node, limits, accel_uuids)
+ scheduled_node, limits, accel_uuids, reimage_boot_volume)
except (exception.ComputeResourcesUnavailable,
exception.RescheduledException) as e:
if isinstance(e, exception.ComputeResourcesUnavailable):
@@ -3625,7 +3765,8 @@ class ComputeManager(manager.Manager):
self, context, instance, orig_image_ref, image_meta,
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
- allocations, rebuild_claim, scheduled_node, limits, accel_uuids):
+ allocations, rebuild_claim, scheduled_node, limits, accel_uuids,
+ reimage_boot_volume):
"""Helper to avoid deep nesting in the top-level method."""
provider_mapping = None
@@ -3647,7 +3788,7 @@ class ComputeManager(manager.Manager):
context, instance, orig_image_ref, image_meta, injected_files,
new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage,
preserve_ephemeral, migration, request_spec, allocations,
- provider_mapping, accel_uuids)
+ provider_mapping, accel_uuids, reimage_boot_volume)
@staticmethod
def _get_image_name(image_meta):
@@ -3661,7 +3802,7 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
allocations, request_group_resource_providers_mapping,
- accel_uuids):
+ accel_uuids, reimage_boot_volume):
orig_vm_state = instance.vm_state
if evacuate:
@@ -3766,8 +3907,23 @@ class ComputeManager(manager.Manager):
self._get_instance_block_device_info(
context, instance, bdms=bdms)
- def detach_block_devices(context, bdms):
+ def detach_block_devices(context, bdms, detach_root_bdm=True):
for bdm in bdms:
+ # Previously, the calls made to this method by rebuild
+ # instance operation were for image backed instances which
+ # assumed we only had attached volumes and no root BDM.
+ # Now we need to handle case for root BDM which we are
+ # doing manually so skipping the attachment create/delete
+ # calls from here.
+ # The detach_root_bdm parameter is only passed while
+ # rebuilding the volume backed instance so we don't have
+ # to worry about other callers as they won't satisfy this
+ # condition.
+ # For evacuate case, we have detach_root_bdm always True
+ # since we don't have reimage_boot_volume parameter in
+ # this case so this will not be executed.
+ if not detach_root_bdm and bdm.is_root:
+ continue
if bdm.is_volume:
# NOTE (ildikov): Having the attachment_id set in the BDM
# means that it's the new Cinder attach/detach flow
@@ -3803,7 +3959,8 @@ class ComputeManager(manager.Manager):
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
evacuate=evacuate,
- accel_uuids=accel_uuids)
+ accel_uuids=accel_uuids,
+ reimage_boot_volume=reimage_boot_volume)
try:
with instance.mutated_migration_context():
self.driver.rebuild(**kwargs)
@@ -11082,7 +11239,7 @@ class _ComputeV5Proxy(object):
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec,
- accel_uuids)
+ accel_uuids, False)
# 5.13 support for optional accel_uuids argument
def shelve_instance(self, context, instance, image_id,
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index fa5b0ee8d9..b370919e83 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -402,6 +402,7 @@ class ComputeAPI(object):
* ... - Rename the instance_type argument of prep_resize() to flavor
* ... - Rename the instance_type argument of resize_instance() to
flavor
+ * 6.1 - Add reimage_boot_volume parameter to rebuild_instance()
'''
VERSION_ALIASES = {
@@ -1080,7 +1081,8 @@ class ComputeAPI(object):
self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate, on_shared_storage, host, node,
- preserve_ephemeral, migration, limits, request_spec, accel_uuids):
+ preserve_ephemeral, migration, limits, request_spec, accel_uuids,
+ reimage_boot_volume):
# NOTE(edleafe): compute nodes can only use the dict form of limits.
if isinstance(limits, objects.SchedulerLimits):
@@ -1092,11 +1094,21 @@ class ComputeAPI(object):
'scheduled_node': node,
'limits': limits,
'request_spec': request_spec,
- 'accel_uuids': accel_uuids
+ 'accel_uuids': accel_uuids,
+ 'reimage_boot_volume': reimage_boot_volume
}
- version = self._ver(ctxt, '5.12')
+
+ version = '6.1'
client = self.router.client(ctxt)
if not client.can_send_version(version):
+ if msg_args['reimage_boot_volume']:
+ raise exception.NovaException(
+ 'Compute RPC version does not support '
+ 'reimage_boot_volume parameter.')
+ else:
+ del msg_args['reimage_boot_volume']
+ version = self._ver(ctxt, '5.12')
+ if not client.can_send_version(version):
del msg_args['accel_uuids']
version = '5.0'
cctxt = client.prepare(server=_compute_host(host, instance),
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 4d94b680a4..778fdd6c73 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -144,7 +144,7 @@ class ComputeTaskAPI(object):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None,
- request_spec=None):
+ request_spec=None, reimage_boot_volume=False):
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
@@ -157,7 +157,8 @@ class ComputeTaskAPI(object):
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host,
- request_spec=request_spec)
+ request_spec=request_spec,
+ reimage_boot_volume=reimage_boot_volume)
def cache_images(self, context, aggregate, image_ids):
"""Request images be pre-cached on hosts within an aggregate.
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 594c3dd61c..9e822db081 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -235,7 +235,7 @@ class ComputeTaskManager:
may involve coordinating activities on multiple compute nodes.
"""
- target = messaging.Target(namespace='compute_task', version='1.23')
+ target = messaging.Target(namespace='compute_task', version='1.24')
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -1146,7 +1146,7 @@ class ComputeTaskManager:
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
- request_spec=None):
+ request_spec=None, reimage_boot_volume=False):
# recreate=True means the instance is being evacuated from a failed
# host to a new destination host. The 'recreate' variable name is
# confusing, so rename it to evacuate here at the top, which is simpler
@@ -1343,7 +1343,8 @@ class ComputeTaskManager:
node=node,
limits=limits,
request_spec=request_spec,
- accel_uuids=accel_uuids)
+ accel_uuids=accel_uuids,
+ reimage_boot_volume=reimage_boot_volume)
def _validate_image_traits_for_rebuild(self, context, instance, image_ref):
"""Validates that the traits specified in the image can be satisfied
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 03797bfff9..ffaecd2c95 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -286,6 +286,7 @@ class ComputeTaskAPI(object):
1.21 - Added cache_images()
1.22 - Added confirm_snapshot_based_resize()
1.23 - Added revert_snapshot_based_resize()
+ 1.24 - Add reimage_boot_volume parameter to rebuild_instance()
"""
def __init__(self):
@@ -426,8 +427,9 @@ class ComputeTaskAPI(object):
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
- preserve_ephemeral=False, request_spec=None):
- version = '1.12'
+ preserve_ephemeral=False, request_spec=None,
+ reimage_boot_volume=False):
+ version = '1.24'
kw = {'instance': instance,
'new_pass': new_pass,
'injected_files': injected_files,
@@ -440,8 +442,17 @@ class ComputeTaskAPI(object):
'preserve_ephemeral': preserve_ephemeral,
'host': host,
'request_spec': request_spec,
+ 'reimage_boot_volume': reimage_boot_volume
}
if not self.client.can_send_version(version):
+ if kw['reimage_boot_volume']:
+ raise exception.NovaException(
+ 'Conductor RPC version does not support '
+ 'reimage_boot_volume parameter.')
+ else:
+ del kw['reimage_boot_volume']
+ version = '1.12'
+ if not self.client.can_send_version(version):
version = '1.8'
del kw['request_spec']
cctxt = self.client.prepare(version=version)
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index 1a139e08d5..004dbb83b6 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -305,6 +305,21 @@ Related options:
agent disabled. When used with libvirt the instance mode should be
configured as HVM.
"""),
+ cfg.IntOpt('reimage_timeout_per_gb',
+ default=20,
+ min=1,
+ help="""
+Timeout for reimaging a volume.
+
+Number of seconds to wait for volume-reimaged events to arrive before
+continuing or failing.
+
+This is a per gigabyte time which has a default value of 20 seconds and
+will be multiplied by the GB size of image. Eg: an image of 6 GB will have
+a timeout of 20 * 6 = 120 seconds.
+Try increasing the timeout if the image copy per GB takes more time and you
+are hitting timeout failures.
+"""),
]
resource_tracker_opts = [
diff --git a/nova/exception.py b/nova/exception.py
index 064c8d9a60..27a0676404 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -1861,6 +1861,17 @@ class MemoryPageSizeNotSupported(Invalid):
msg_fmt = _("Page size %(pagesize)s is not supported by the host.")
+class LockMemoryForbidden(Forbidden):
+ msg_fmt = _("locked_memory value in image or flavor is forbidden when "
+ "mem_page_size is not set.")
+
+
+class FlavorImageLockedMemoryConflict(NovaException):
+ msg_fmt = _("locked_memory value in image (%(image)s) and flavor "
+ "(%(flavor)s) conflict. A consistent value is expected if "
+ "both specified.")
+
+
class CPUPinningInvalid(Invalid):
msg_fmt = _("CPU set to pin %(requested)s must be a subset of "
"free CPU set %(available)s")
@@ -2466,3 +2477,7 @@ class PlacementPciMixedTraitsException(PlacementPciException):
"of 'traits' in [pci]device_spec. We got %(new_traits)s for "
"%(new_dev)s and %(current_traits)s for %(current_devs)s."
)
+
+
+class ReimageException(NovaException):
+ msg_fmt = _("Reimaging volume failed.")
diff --git a/nova/notifications/objects/image.py b/nova/notifications/objects/image.py
index 6f6e3b7c0f..a408b27eab 100644
--- a/nova/notifications/objects/image.py
+++ b/nova/notifications/objects/image.py
@@ -128,7 +128,8 @@ class ImageMetaPropsPayload(base.NotificationPayloadBase):
# Version 1.9: Added 'hw_emulation_architecture' field
# Version 1.10: Added 'hw_ephemeral_encryption' and
# 'hw_ephemeral_encryption_format' fields
- VERSION = '1.10'
+ # Version 1.11: Added 'hw_locked_memory' field
+ VERSION = '1.11'
SCHEMA = {
k: ('image_meta_props', k) for k in image_meta.ImageMetaProps.fields}
diff --git a/nova/objects/external_event.py b/nova/objects/external_event.py
index b1acfc4aa0..e17008dade 100644
--- a/nova/objects/external_event.py
+++ b/nova/objects/external_event.py
@@ -33,6 +33,9 @@ EVENT_NAMES = [
# Accelerator Request got bound, tag is ARQ uuid.
# Sent when an ARQ for an instance has been bound or failed to bind.
'accelerator-request-bound',
+
+ # re-image operation has completed from cinder side
+ 'volume-reimaged',
]
EVENT_STATUSES = ['failed', 'completed', 'in-progress']
@@ -50,7 +53,8 @@ class InstanceExternalEvent(obj_base.NovaObject):
# Version 1.2: adds volume-extended event
# Version 1.3: adds power-update event
# Version 1.4: adds accelerator-request-bound event
- VERSION = '1.4'
+ # Version 1.5: adds volume-reimaged event
+ VERSION = '1.5'
fields = {
'instance_uuid': fields.UUIDField(),
diff --git a/nova/objects/image_meta.py b/nova/objects/image_meta.py
index f17f145daf..0ca8ed571f 100644
--- a/nova/objects/image_meta.py
+++ b/nova/objects/image_meta.py
@@ -190,14 +190,17 @@ class ImageMetaProps(base.NovaObject):
# Version 1.31: Added 'hw_emulation_architecture' field
# Version 1.32: Added 'hw_ephemeral_encryption' and
# 'hw_ephemeral_encryption_format' fields
+ # Version 1.33: Added 'hw_locked_memory' field
# NOTE(efried): When bumping this version, the version of
# ImageMetaPropsPayload must also be bumped. See its docstring for details.
- VERSION = '1.32'
+ VERSION = '1.33'
def obj_make_compatible(self, primitive, target_version):
super(ImageMetaProps, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
+ if target_version < (1, 33):
+ primitive.pop('hw_locked_memory', None)
if target_version < (1, 32):
primitive.pop('hw_ephemeral_encryption', None)
primitive.pop('hw_ephemeral_encryption_format', None)
@@ -368,6 +371,10 @@ class ImageMetaProps(base.NovaObject):
# image with a network boot image
'hw_ipxe_boot': fields.FlexibleBooleanField(),
+ # string - make sure ``locked`` element is present in the
+ # ``memoryBacking``.
+ 'hw_locked_memory': fields.FlexibleBooleanField(),
+
# There are sooooooooooo many possible machine types in
# QEMU - several new ones with each new release - that it
# is not practical to enumerate them all. So we use a free
diff --git a/nova/objects/service.py b/nova/objects/service.py
index 8885120ddd..05aeb1b538 100644
--- a/nova/objects/service.py
+++ b/nova/objects/service.py
@@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
-SERVICE_VERSION = 63
+SERVICE_VERSION = 64
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@@ -222,6 +222,9 @@ SERVICE_VERSION_HISTORY = (
# Version 63: Compute RPC v6.0:
# Add support for VDPA hotplug live migration and suspend/resume
{'compute_rpc': '6.0'},
+ # Version 64: Compute RPC v6.1:
+ # Add reimage_boot_volume parameter to rebuild_instance()
+ {'compute_rpc': '6.1'},
)
# This is used to raise an error at service startup if older than N-1 computes
diff --git a/nova/policies/admin_actions.py b/nova/policies/admin_actions.py
index 4db7d8e1c3..e07d66ee36 100644
--- a/nova/policies/admin_actions.py
+++ b/nova/policies/admin_actions.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-admin-actions:%s'
admin_actions_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'reset_state',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Reset the state of a given server",
operations=[
{
@@ -35,7 +35,7 @@ admin_actions_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'inject_network_info',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Inject network information into the server",
operations=[
{
diff --git a/nova/policies/admin_password.py b/nova/policies/admin_password.py
index 439966a9af..ad87aa7c96 100644
--- a/nova/policies/admin_password.py
+++ b/nova/policies/admin_password.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-admin-password'
admin_password_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Change the administrative password for a server",
operations=[
{
diff --git a/nova/policies/assisted_volume_snapshots.py b/nova/policies/assisted_volume_snapshots.py
index 0630ee7a50..98a67a8e37 100644
--- a/nova/policies/assisted_volume_snapshots.py
+++ b/nova/policies/assisted_volume_snapshots.py
@@ -29,7 +29,7 @@ assisted_volume_snapshots_policies = [
# can call it with user having 'service' role (not having
# correct project_id). That is for phase-2 of RBAC goal and until
# then, we keep it open for all admin in any project. We cannot
- # default it to PROJECT_ADMIN which has the project_id in
+ # default it to ADMIN which has the project_id in
# check_str and will fail if cinder call it with other project_id.
check_str=base.ADMIN,
description="Create an assisted volume snapshot",
@@ -47,7 +47,7 @@ assisted_volume_snapshots_policies = [
# can call it with user having 'service' role (not having
# correct project_id). That is for phase-2 of RBAC goal and until
# then, we keep it open for all admin in any project. We cannot
- # default it to PROJECT_ADMIN which has the project_id in
+ # default it to ADMIN which has the project_id in
# check_str and will fail if cinder call it with other project_id.
check_str=base.ADMIN,
description="Delete an assisted volume snapshot",
diff --git a/nova/policies/attach_interfaces.py b/nova/policies/attach_interfaces.py
index eb365fd99d..b996e8ae59 100644
--- a/nova/policies/attach_interfaces.py
+++ b/nova/policies/attach_interfaces.py
@@ -37,7 +37,7 @@ DEPRECATED_INTERFACES_POLICY = policy.DeprecatedRule(
attach_interfaces_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List port interfaces attached to a server",
operations=[
{
@@ -49,7 +49,7 @@ attach_interfaces_policies = [
deprecated_rule=DEPRECATED_INTERFACES_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show details of a port interface attached to a server",
operations=[
{
@@ -61,7 +61,7 @@ attach_interfaces_policies = [
deprecated_rule=DEPRECATED_INTERFACES_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Attach an interface to a server",
operations=[
{
@@ -73,7 +73,7 @@ attach_interfaces_policies = [
deprecated_rule=DEPRECATED_INTERFACES_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Detach an interface from a server",
operations=[
{
diff --git a/nova/policies/base.py b/nova/policies/base.py
index 2d60f4634a..ab0c319cdf 100644
--- a/nova/policies/base.py
+++ b/nova/policies/base.py
@@ -36,28 +36,26 @@ DEPRECATED_ADMIN_OR_OWNER_POLICY = policy.DeprecatedRule(
deprecated_reason=DEPRECATED_REASON,
deprecated_since='21.0.0'
)
-PROJECT_ADMIN = 'rule:project_admin_api'
+
+ADMIN = 'rule:context_is_admin'
PROJECT_MEMBER = 'rule:project_member_api'
PROJECT_READER = 'rule:project_reader_api'
+PROJECT_MEMBER_OR_ADMIN = 'rule:project_member_or_admin'
PROJECT_READER_OR_ADMIN = 'rule:project_reader_or_admin'
-ADMIN = 'rule:context_is_admin'
-# NOTE(gmann): Below is the mapping of new roles and scope_types
-# with legacy roles::
+# NOTE(gmann): Below is the mapping of new roles with legacy roles::
-# Legacy Rule | New Rules |Operation |scope_type|
-# -------------------+---------------------+----------------+-----------
-# |-> ADMIN |Global resource | [system]
-# RULE_ADMIN_API | |Write & Read |
-# |-> PROJECT_ADMIN |Project resource| [project]
-# | |Write |
-# ----------------------------------------------------------------------
-# |-> PROJECT_ADMIN |Project resource| [project]
-# | |Write |
-# |-> PROJECT_MEMBER |Project resource| [project]
-# RULE_ADMIN_OR_OWNER| |Write |
-# |-> PROJECT_READER |Project resource| [project]
-# | |Read |
+# Legacy Rule | New Rules |Operation |scope_type|
+# -------------------+---------------------------+----------------+-----------
+# RULE_ADMIN_API |-> ADMIN |Global resource | [project]
+# | |Write & Read |
+# -------------------+---------------------------+----------------+-----------
+# |-> ADMIN |Project admin | [project]
+# | |level operation |
+# RULE_ADMIN_OR_OWNER|-> PROJECT_MEMBER_OR_ADMIN |Project resource| [project]
+# | |Write |
+# |-> PROJECT_READER_OR_ADMIN |Project resource| [project]
+# | |Read |
# NOTE(johngarbutt) The base rules here affect so many APIs the list
# of related API operations has not been populated. It would be
@@ -92,11 +90,6 @@ rules = [
deprecated_reason=DEPRECATED_REASON,
deprecated_since='21.0.0'),
policy.RuleDefault(
- "project_admin_api",
- "role:admin and project_id:%(project_id)s",
- "Default rule for Project level admin APIs.",
- deprecated_rule=DEPRECATED_ADMIN_POLICY),
- policy.RuleDefault(
"project_member_api",
"role:member and project_id:%(project_id)s",
"Default rule for Project level non admin APIs.",
@@ -107,9 +100,14 @@ rules = [
"Default rule for Project level read only APIs.",
deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY),
policy.RuleDefault(
+ "project_member_or_admin",
+ "rule:project_member_api or rule:context_is_admin",
+ "Default rule for Project Member or admin APIs.",
+ deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY),
+ policy.RuleDefault(
"project_reader_or_admin",
"rule:project_reader_api or rule:context_is_admin",
- "Default rule for Project reader and admin APIs.",
+ "Default rule for Project reader or admin APIs.",
deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY)
]
diff --git a/nova/policies/console_auth_tokens.py b/nova/policies/console_auth_tokens.py
index bad3130e78..5f784965cf 100644
--- a/nova/policies/console_auth_tokens.py
+++ b/nova/policies/console_auth_tokens.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-console-auth-tokens'
console_auth_tokens_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Show console connection information for a given console "
"authentication token",
operations=[
diff --git a/nova/policies/console_output.py b/nova/policies/console_output.py
index 4a5a21ef55..625971b5d7 100644
--- a/nova/policies/console_output.py
+++ b/nova/policies/console_output.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-console-output'
console_output_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description='Show console output for a server',
operations=[
{
diff --git a/nova/policies/create_backup.py b/nova/policies/create_backup.py
index 173ad3e36f..c18fa11e84 100644
--- a/nova/policies/create_backup.py
+++ b/nova/policies/create_backup.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-create-backup'
create_backup_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description='Create a back up of a server',
operations=[
{
diff --git a/nova/policies/deferred_delete.py b/nova/policies/deferred_delete.py
index a912966897..9c18aa02de 100644
--- a/nova/policies/deferred_delete.py
+++ b/nova/policies/deferred_delete.py
@@ -36,7 +36,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
deferred_delete_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'restore',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Restore a soft deleted server",
operations=[
{
@@ -48,7 +48,7 @@ deferred_delete_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'force',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Force delete a server before deferred cleanup",
operations=[
{
diff --git a/nova/policies/evacuate.py b/nova/policies/evacuate.py
index 4c66f90147..3a0fd502fd 100644
--- a/nova/policies/evacuate.py
+++ b/nova/policies/evacuate.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-evacuate'
evacuate_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Evacuate a server from a failed host to a new host",
operations=[
{
diff --git a/nova/policies/extended_server_attributes.py b/nova/policies/extended_server_attributes.py
index ce5c531a73..ba151a36cc 100644
--- a/nova/policies/extended_server_attributes.py
+++ b/nova/policies/extended_server_attributes.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-extended-server-attributes'
extended_server_attributes_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Return extended attributes for server.
This rule will control the visibility for a set of servers attributes:
diff --git a/nova/policies/floating_ips.py b/nova/policies/floating_ips.py
index 2cb5b34679..48d60d7b89 100644
--- a/nova/policies/floating_ips.py
+++ b/nova/policies/floating_ips.py
@@ -38,7 +38,7 @@ DEPRECATED_FIP_POLICY = policy.DeprecatedRule(
floating_ips_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'add',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Associate floating IPs to server. "
" This API is deprecated.",
operations=[
@@ -51,7 +51,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'remove',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Disassociate floating IPs to server. "
" This API is deprecated.",
operations=[
@@ -64,7 +64,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List floating IPs. This API is deprecated.",
operations=[
{
@@ -76,7 +76,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create floating IPs. This API is deprecated.",
operations=[
{
@@ -88,7 +88,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show floating IPs. This API is deprecated.",
operations=[
{
@@ -100,7 +100,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete floating IPs. This API is deprecated.",
operations=[
{
diff --git a/nova/policies/instance_actions.py b/nova/policies/instance_actions.py
index 85e2f63244..e3e16a58f0 100644
--- a/nova/policies/instance_actions.py
+++ b/nova/policies/instance_actions.py
@@ -38,7 +38,7 @@ DEPRECATED_INSTANCE_ACTION_POLICY = policy.DeprecatedRule(
instance_actions_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'events:details',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Add "details" key in action events for a server.
This check is performed only after the check
@@ -59,7 +59,7 @@ but in the other hand it might leak information about the deployment
scope_types=['project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'events',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Add events details in action details for a server.
This check is performed only after the check
os_compute_api:os-instance-actions:show passes. Beginning with Microversion
@@ -76,7 +76,7 @@ passes, the name of the host.""",
scope_types=['project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List actions for a server.""",
operations=[
{
@@ -88,7 +88,7 @@ passes, the name of the host.""",
deprecated_rule=DEPRECATED_INSTANCE_ACTION_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show action details for a server.""",
operations=[
{
diff --git a/nova/policies/ips.py b/nova/policies/ips.py
index d63c345389..20cad2522a 100644
--- a/nova/policies/ips.py
+++ b/nova/policies/ips.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:ips:%s'
ips_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show IP addresses details for a network label of a "
" server",
operations=[
@@ -36,7 +36,7 @@ ips_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List IP addresses that are assigned to a server",
operations=[
{
diff --git a/nova/policies/limits.py b/nova/policies/limits.py
index 56bc0e830d..1216dd1995 100644
--- a/nova/policies/limits.py
+++ b/nova/policies/limits.py
@@ -49,7 +49,7 @@ limits_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=OTHER_PROJECT_LIMIT_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Show rate and absolute limits of other project.
This policy only checks if the user has access to the requested
diff --git a/nova/policies/lock_server.py b/nova/policies/lock_server.py
index ca65b1cf9b..f7a018803c 100644
--- a/nova/policies/lock_server.py
+++ b/nova/policies/lock_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-lock-server:%s'
lock_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'lock',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Lock a server",
operations=[
{
@@ -36,7 +36,7 @@ lock_server_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unlock',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unlock a server",
operations=[
{
@@ -48,7 +48,7 @@ lock_server_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unlock:unlock_override',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Unlock a server, regardless who locked the server.
This check is performed only after the check
diff --git a/nova/policies/migrate_server.py b/nova/policies/migrate_server.py
index d00fd562d2..0b3d7c8bd1 100644
--- a/nova/policies/migrate_server.py
+++ b/nova/policies/migrate_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-migrate-server:%s'
migrate_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'migrate',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Cold migrate a server to a host",
operations=[
{
@@ -35,7 +35,7 @@ migrate_server_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'migrate_live',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Live migrate a server to a new host without a reboot",
operations=[
{
diff --git a/nova/policies/migrations.py b/nova/policies/migrations.py
index 4647d53496..ce2aeaa564 100644
--- a/nova/policies/migrations.py
+++ b/nova/policies/migrations.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-migrations:%s'
migrations_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List migrations",
operations=[
{
diff --git a/nova/policies/multinic.py b/nova/policies/multinic.py
index ff16cb5143..7119ec25b4 100644
--- a/nova/policies/multinic.py
+++ b/nova/policies/multinic.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
multinic_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'add',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Add a fixed IP address to a server.
This API is proxy calls to the Network service. This is
@@ -53,7 +53,7 @@ deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'remove',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Remove a fixed IP address from a server.
This API is proxy calls to the Network service. This is
diff --git a/nova/policies/networks.py b/nova/policies/networks.py
index ab0ce1512b..928705d8be 100644
--- a/nova/policies/networks.py
+++ b/nova/policies/networks.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
networks_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List networks for the project.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -52,7 +52,7 @@ This API is proxy calls to the Network service. This is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show network details.
This API is proxy calls to the Network service. This is deprecated.""",
diff --git a/nova/policies/pause_server.py b/nova/policies/pause_server.py
index a7318b16f8..96a1ff4c0d 100644
--- a/nova/policies/pause_server.py
+++ b/nova/policies/pause_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-pause-server:%s'
pause_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'pause',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Pause a server",
operations=[
{
@@ -36,7 +36,7 @@ pause_server_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unpause',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unpause a paused server",
operations=[
{
diff --git a/nova/policies/quota_sets.py b/nova/policies/quota_sets.py
index a44c6fa918..ae8c471f56 100644
--- a/nova/policies/quota_sets.py
+++ b/nova/policies/quota_sets.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-quota-sets:%s'
quota_sets_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Update the quotas",
operations=[
{
@@ -46,13 +46,7 @@ quota_sets_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- # TODO(gmann): Until we have domain admin or so to get other project's
- # data, allow admin role(with scope check it will be project admin) to
- # get other project quota. We cannot use PROJECT_ADMIN here as
- # project_id passed in request url is used as policy targets which
- # would not match with context's project_id fetched for rule
- # PROJECT_ADMIN check.
- check_str='(' + base.PROJECT_READER + ') or role:admin',
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show a quota",
operations=[
{
@@ -63,7 +57,7 @@ quota_sets_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Revert quotas to defaults",
operations=[
{
@@ -77,7 +71,7 @@ quota_sets_policies = [
# TODO(gmann): Until we have domain admin or so to get other project's
# data, allow admin role(with scope check it will be project admin) to
# get other project quota.
- check_str='(' + base.PROJECT_READER + ') or role:admin',
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show the detail of quota",
operations=[
{
diff --git a/nova/policies/remote_consoles.py b/nova/policies/remote_consoles.py
index 4b217dc74c..e32dd33d4c 100644
--- a/nova/policies/remote_consoles.py
+++ b/nova/policies/remote_consoles.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-remote-consoles'
remote_consoles_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Generate a URL to access remove server console.
This policy is for ``POST /remote-consoles`` API and below Server actions APIs
diff --git a/nova/policies/rescue.py b/nova/policies/rescue.py
index 040caa4275..f9f72e92ef 100644
--- a/nova/policies/rescue.py
+++ b/nova/policies/rescue.py
@@ -37,7 +37,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
rescue_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rescue a server",
operations=[
{
@@ -48,7 +48,7 @@ rescue_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=UNRESCUE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unrescue a server",
operations=[
{
diff --git a/nova/policies/security_groups.py b/nova/policies/security_groups.py
index e5649d5da5..d6318bc724 100644
--- a/nova/policies/security_groups.py
+++ b/nova/policies/security_groups.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
security_groups_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'get',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List security groups. This API is deprecated.",
operations=[
{
@@ -50,7 +50,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show security group. This API is deprecated.",
operations=[
{
@@ -62,7 +62,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create security group. This API is deprecated.",
operations=[
{
@@ -74,7 +74,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Update security group. This API is deprecated.",
operations=[
{
@@ -86,7 +86,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete security group. This API is deprecated.",
operations=[
{
@@ -98,7 +98,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'rule:create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create security group Rule. This API is deprecated.",
operations=[
{
@@ -110,7 +110,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'rule:delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete security group Rule. This API is deprecated.",
operations=[
{
@@ -122,7 +122,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List security groups of server.",
operations=[
{
@@ -134,7 +134,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'add',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Add security groups to server.",
operations=[
{
@@ -146,7 +146,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'remove',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Remove security groups from server.",
operations=[
{
diff --git a/nova/policies/server_diagnostics.py b/nova/policies/server_diagnostics.py
index ebafab4378..6774b7e862 100644
--- a/nova/policies/server_diagnostics.py
+++ b/nova/policies/server_diagnostics.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-server-diagnostics'
server_diagnostics_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Show the usage data for a server",
operations=[
{
diff --git a/nova/policies/server_external_events.py b/nova/policies/server_external_events.py
index da832eb94d..56034d0186 100644
--- a/nova/policies/server_external_events.py
+++ b/nova/policies/server_external_events.py
@@ -30,7 +30,7 @@ server_external_events_policies = [
# neutron can call it with user having 'service' role (not having
# server's project_id). That is for phase-2 of RBAC goal and until
# then, we keep it open for all admin in any project. We cannot
- # default it to PROJECT_ADMIN which has the project_id in
+ # default it to ADMIN which has the project_id in
# check_str and will fail if neutron call it with other project_id.
check_str=base.ADMIN,
description="Create one or more external events",
diff --git a/nova/policies/server_groups.py b/nova/policies/server_groups.py
index be1cb62835..8dfbe7c202 100644
--- a/nova/policies/server_groups.py
+++ b/nova/policies/server_groups.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-server-groups:%s'
server_groups_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a new server group",
operations=[
{
@@ -36,7 +36,7 @@ server_groups_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete a server group",
operations=[
{
@@ -48,7 +48,7 @@ server_groups_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all server groups",
operations=[
{
@@ -60,7 +60,7 @@ server_groups_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index:all_projects',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List all server groups for all projects",
operations=[
{
@@ -72,7 +72,7 @@ server_groups_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show details of a server group",
operations=[
{
diff --git a/nova/policies/server_metadata.py b/nova/policies/server_metadata.py
index 1e6b525cb6..f136df8439 100644
--- a/nova/policies/server_metadata.py
+++ b/nova/policies/server_metadata.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:server-metadata:%s'
server_metadata_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all metadata of a server",
operations=[
{
@@ -36,7 +36,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show metadata for a server",
operations=[
{
@@ -48,7 +48,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create metadata for a server",
operations=[
{
@@ -60,7 +60,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update_all',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Replace metadata for a server",
operations=[
{
@@ -72,7 +72,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Update metadata from a server",
operations=[
{
@@ -84,7 +84,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete metadata from a server",
operations=[
{
diff --git a/nova/policies/server_password.py b/nova/policies/server_password.py
index 95fa95830c..1f9ddafd3c 100644
--- a/nova/policies/server_password.py
+++ b/nova/policies/server_password.py
@@ -37,7 +37,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
server_password_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show the encrypted administrative "
"password of a server",
operations=[
@@ -50,7 +50,7 @@ server_password_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'clear',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Clear the encrypted administrative "
"password of a server",
operations=[
diff --git a/nova/policies/server_tags.py b/nova/policies/server_tags.py
index 014c8d1488..baa1123987 100644
--- a/nova/policies/server_tags.py
+++ b/nova/policies/server_tags.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-server-tags:%s'
server_tags_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete_all',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete all the server tags",
operations=[
{
@@ -35,7 +35,7 @@ server_tags_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all tags for given server",
operations=[
{
@@ -46,7 +46,7 @@ server_tags_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update_all',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Replace all tags on specified server with the new set "
"of tags.",
operations=[
@@ -59,7 +59,7 @@ server_tags_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete a single tag from the specified server",
operations=[
{
@@ -71,7 +71,7 @@ server_tags_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Add a single tag to the server if server has no "
"specified tag",
operations=[
@@ -84,7 +84,7 @@ server_tags_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Check tag existence on the server.",
operations=[
{
diff --git a/nova/policies/server_topology.py b/nova/policies/server_topology.py
index 7b68e67481..0e6c203e4f 100644
--- a/nova/policies/server_topology.py
+++ b/nova/policies/server_topology.py
@@ -21,7 +21,7 @@ BASE_POLICY_NAME = 'compute:server:topology:%s'
server_topology_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show the NUMA topology data for a server",
operations=[
{
@@ -33,7 +33,7 @@ server_topology_policies = [
policy.DocumentedRuleDefault(
# Control host NUMA node and cpu pinning information
name=BASE_POLICY_NAME % 'host:index',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Show the NUMA topology data for a server with host "
"NUMA ID and CPU pinning information",
operations=[
diff --git a/nova/policies/servers.py b/nova/policies/servers.py
index faa8f8d02c..1e41baa203 100644
--- a/nova/policies/servers.py
+++ b/nova/policies/servers.py
@@ -36,7 +36,7 @@ not for list extra specs and showing it in flavor API response.
rules = [
policy.DocumentedRuleDefault(
name=SERVERS % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all servers",
operations=[
{
@@ -47,7 +47,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'detail',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all servers with detailed information",
operations=[
{
@@ -58,7 +58,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'index:get_all_tenants',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List all servers for all projects",
operations=[
{
@@ -70,7 +70,7 @@ rules = [
policy.DocumentedRuleDefault(
name=SERVERS % 'detail:get_all_tenants',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List all servers with detailed information for "
" all projects",
operations=[
@@ -82,7 +82,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'allow_all_filters',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Allow all filters when listing servers",
operations=[
{
@@ -97,7 +97,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show a server",
operations=[
{
@@ -108,7 +108,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show:flavor-extra-specs',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Starting with microversion 2.47, the flavor and its "
"extra specs used for a server is also returned in the response "
"when showing server details, updating a server or rebuilding a "
@@ -140,7 +140,7 @@ rules = [
# should do that by default.
policy.DocumentedRuleDefault(
name=SERVERS % 'show:host_status',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Show a server with additional host status information.
@@ -174,7 +174,7 @@ API responses which are also controlled by this policy rule, like the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show:host_status:unknown-only',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Show a server with additional host status information, only if host status is
UNKNOWN.
@@ -207,7 +207,7 @@ allow everyone.
scope_types=['project'],),
policy.DocumentedRuleDefault(
name=SERVERS % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server",
operations=[
{
@@ -218,7 +218,7 @@ allow everyone.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:forced_host',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Create a server on the specified host and/or node.
@@ -235,7 +235,7 @@ host and/or node by bypassing the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=REQUESTED_DESTINATION,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Create a server on the requested compute service host and/or
hypervisor_hostname.
@@ -253,7 +253,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:attach_volume',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server with the requested volume attached to it",
operations=[
{
@@ -264,7 +264,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:attach_network',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server with the requested network attached "
" to it",
operations=[
@@ -276,7 +276,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:trusted_certs',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server with trusted image certificate IDs",
operations=[
{
@@ -287,7 +287,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=ZERO_DISK_FLAVOR,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
This rule controls the compute API validation behavior of creating a server
with a flavor that has 0 disk, indicating the server should be volume-backed.
@@ -312,7 +312,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=NETWORK_ATTACH_EXTERNAL,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Attach an unshared external network to a server",
operations=[
# Create a server with a requested network or port.
@@ -329,7 +329,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete a server",
operations=[
{
@@ -340,7 +340,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Update a server",
operations=[
{
@@ -351,7 +351,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'confirm_resize',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Confirm a server resize",
operations=[
{
@@ -362,7 +362,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'revert_resize',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Revert a server resize",
operations=[
{
@@ -373,7 +373,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'reboot',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Reboot a server",
operations=[
{
@@ -384,7 +384,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'resize',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Resize a server",
operations=[
{
@@ -410,7 +410,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'rebuild',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rebuild a server",
operations=[
{
@@ -421,7 +421,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'rebuild:trusted_certs',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rebuild a server with trusted image certificate IDs",
operations=[
{
@@ -432,7 +432,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create_image',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create an image from a server",
operations=[
{
@@ -443,7 +443,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create_image:allow_volume_backed',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create an image from a volume backed server",
operations=[
{
@@ -454,7 +454,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'start',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Start a server",
operations=[
{
@@ -465,7 +465,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'stop',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Stop a server",
operations=[
{
@@ -476,7 +476,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'trigger_crash_dump',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Trigger crash dump in a server",
operations=[
{
diff --git a/nova/policies/servers_migrations.py b/nova/policies/servers_migrations.py
index 427da8bba2..21762fc575 100644
--- a/nova/policies/servers_migrations.py
+++ b/nova/policies/servers_migrations.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:servers:migrations:%s'
servers_migrations_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Show details for an in-progress live migration for a "
"given server",
operations=[
@@ -36,7 +36,7 @@ servers_migrations_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'force_complete',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Force an in-progress live migration for a given server "
"to complete",
operations=[
@@ -49,7 +49,7 @@ servers_migrations_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Delete(Abort) an in-progress live migration",
operations=[
{
@@ -60,7 +60,7 @@ servers_migrations_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Lists in-progress live migrations for a given server",
operations=[
{
diff --git a/nova/policies/shelve.py b/nova/policies/shelve.py
index eb06ffaa2f..476d212b04 100644
--- a/nova/policies/shelve.py
+++ b/nova/policies/shelve.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-shelve:%s'
shelve_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'shelve',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Shelve server",
operations=[
{
@@ -35,7 +35,7 @@ shelve_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unshelve',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unshelve (restore) shelved server",
operations=[
{
@@ -46,7 +46,7 @@ shelve_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unshelve_to_host',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Unshelve (restore) shelve offloaded server to a "
"specific host",
operations=[
@@ -58,7 +58,7 @@ shelve_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'shelve_offload',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Shelf-offload (remove) server",
operations=[
{
diff --git a/nova/policies/simple_tenant_usage.py b/nova/policies/simple_tenant_usage.py
index d97d5909eb..41d87d1426 100644
--- a/nova/policies/simple_tenant_usage.py
+++ b/nova/policies/simple_tenant_usage.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-simple-tenant-usage:%s'
simple_tenant_usage_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show usage statistics for a specific tenant",
operations=[
{
@@ -35,7 +35,7 @@ simple_tenant_usage_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List per tenant usage statistics for all tenants",
operations=[
{
diff --git a/nova/policies/suspend_server.py b/nova/policies/suspend_server.py
index 3a603903c8..5e889808fd 100644
--- a/nova/policies/suspend_server.py
+++ b/nova/policies/suspend_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-suspend-server:%s'
suspend_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'resume',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Resume suspended server",
operations=[
{
@@ -35,7 +35,7 @@ suspend_server_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'suspend',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Suspend server",
operations=[
{
diff --git a/nova/policies/volumes.py b/nova/policies/volumes.py
index 0ee941074d..129ced82c1 100644
--- a/nova/policies/volumes.py
+++ b/nova/policies/volumes.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
volumes_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List volumes.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -52,7 +52,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Create volume.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -66,7 +66,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'detail',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List volumes detail.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -80,7 +80,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show volume.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -94,7 +94,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Delete volume.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -108,7 +108,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List snapshots.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -122,7 +122,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Create snapshots.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -136,7 +136,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:detail',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List snapshots details.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -150,7 +150,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show snapshot.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -164,7 +164,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Delete snapshot.
This API is a proxy call to the Volume service. It is deprecated.""",
diff --git a/nova/policies/volumes_attachments.py b/nova/policies/volumes_attachments.py
index 20b3a2f3e6..68a1694c59 100644
--- a/nova/policies/volumes_attachments.py
+++ b/nova/policies/volumes_attachments.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-volumes-attachments:%s'
volumes_attachments_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List volume attachments for an instance",
operations=[
{'method': 'GET',
@@ -34,7 +34,7 @@ volumes_attachments_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Attach a volume to an instance",
operations=[
{
@@ -45,7 +45,7 @@ volumes_attachments_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show details of a volume attachment",
operations=[
{
@@ -57,7 +57,7 @@ volumes_attachments_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Update a volume attachment.
New 'update' policy about 'swap + update' request (which is possible
only >2.85) only <swap policy> is checked. We expect <swap policy> to be
@@ -78,7 +78,7 @@ always superset of this policy permission.
# can call it with user having 'service' role (not having server's
# project_id). That is for phase-2 of RBAC goal and until then,
# we keep it open for all admin in any project. We cannot default it to
- # PROJECT_ADMIN which has the project_id in check_str and will fail
+ # ADMIN which has the project_id in check_str and will fail
# if cinder call it with other project_id.
check_str=base.ADMIN,
description="Update a volume attachment with a different volumeId",
@@ -92,7 +92,7 @@ always superset of this policy permission.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Detach a volume from an instance",
operations=[
{
diff --git a/nova/tests/fixtures/cinder.py b/nova/tests/fixtures/cinder.py
index 97b32d9b84..29889c784a 100644
--- a/nova/tests/fixtures/cinder.py
+++ b/nova/tests/fixtures/cinder.py
@@ -327,6 +327,12 @@ class CinderFixture(fixtures.Fixture):
_find_attachment(attachment_id)
LOG.info('Completing volume attachment: %s', attachment_id)
+ def fake_reimage_volume(*args, **kwargs):
+ if self.IMAGE_BACKED_VOL not in args:
+ raise exception.VolumeNotFound()
+ if 'reimage_reserved' not in kwargs:
+ raise exception.InvalidInput('reimage_reserved not specified')
+
self.test.stub_out(
'nova.volume.cinder.API.attachment_create', fake_attachment_create)
self.test.stub_out(
@@ -366,6 +372,9 @@ class CinderFixture(fixtures.Fixture):
self.test.stub_out(
'nova.volume.cinder.API.terminate_connection',
lambda *args, **kwargs: None)
+ self.test.stub_out(
+ 'nova.volume.cinder.API.reimage_volume',
+ fake_reimage_volume)
def volume_ids_for_instance(self, instance_uuid):
for volume_id, attachments in self.volume_to_attachment.items():
diff --git a/nova/tests/fixtures/libvirt_imagebackend.py b/nova/tests/fixtures/libvirt_imagebackend.py
index 3d6f2e81e9..ea32b6b34a 100644
--- a/nova/tests/fixtures/libvirt_imagebackend.py
+++ b/nova/tests/fixtures/libvirt_imagebackend.py
@@ -154,7 +154,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# their construction. Tests can use this to assert that disks were
# created of the expected type.
- def image_init(instance=None, disk_name=None, path=None):
+ def image_init(
+ instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
@@ -169,6 +171,7 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', mock.sentinel.is_block_dev)
+ setattr(disk, 'disk_info_mapping', disk_info_mapping)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
@@ -217,16 +220,16 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(
- self, mock_disk, disk_info, cache_mode, extra_specs, disk_unit=None,
+ self, mock_disk, cache_mode, extra_specs, disk_unit=None,
boot_order=None,
):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
- info.source_device = disk_info['type']
- info.target_bus = disk_info['bus']
- info.target_dev = disk_info['dev']
+ info.source_device = mock_disk.disk_info_mapping['type']
+ info.target_bus = mock_disk.disk_info_mapping['bus']
+ info.target_dev = mock_disk.disk_info_mapping['dev']
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
diff --git a/nova/tests/functional/api_sample_tests/test_evacuate.py b/nova/tests/functional/api_sample_tests/test_evacuate.py
index 14b7b09cf0..ab3aa95ad8 100644
--- a/nova/tests/functional/api_sample_tests/test_evacuate.py
+++ b/nova/tests/functional/api_sample_tests/test_evacuate.py
@@ -79,7 +79,8 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -95,7 +96,8 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False)
class EvacuateJsonTestV214(EvacuateJsonTest):
@@ -116,7 +118,8 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -131,7 +134,8 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False)
class EvacuateJsonTestV229(EvacuateJsonTestV214):
@@ -158,7 +162,8 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
@@ -178,7 +183,8 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False)
class EvacuateJsonTestV268(EvacuateJsonTestV229):
@@ -204,7 +210,8 @@ class EvacuateJsonTestV268(EvacuateJsonTestV229):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False)
def test_server_evacuate_with_force(self):
# doesn't apply to v2.68+, which removed the ability to force migrate
diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py
index 84c7246f67..e6c88be239 100644
--- a/nova/tests/functional/notification_sample_tests/test_instance.py
+++ b/nova/tests/functional/notification_sample_tests/test_instance.py
@@ -1231,7 +1231,7 @@ class TestInstanceNotificationSample(
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
- 'nova_object.version': '1.10',
+ 'nova_object.version': '1.11',
},
'image.size': 58145823,
'image.tags': [],
@@ -1327,7 +1327,7 @@ class TestInstanceNotificationSample(
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
- 'nova_object.version': '1.10',
+ 'nova_object.version': '1.11',
},
'image.size': 58145823,
'image.tags': [],
diff --git a/nova/tests/functional/regressions/test_bug_1732947.py b/nova/tests/functional/regressions/test_bug_1732947.py
index 3637f40bc2..db518fa8ce 100644
--- a/nova/tests/functional/regressions/test_bug_1732947.py
+++ b/nova/tests/functional/regressions/test_bug_1732947.py
@@ -28,7 +28,9 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase):
original image.
"""
api_major_version = 'v2.1'
- microversion = 'latest'
+ # We need microversion <=2.93 to get the old BFV rebuild behavior
+ # that was the environment for this regression.
+ microversion = '2.92'
def _setup_scheduler_service(self):
# Add the IsolatedHostsFilter to the list of enabled filters since it
diff --git a/nova/tests/functional/regressions/test_bug_1902925.py b/nova/tests/functional/regressions/test_bug_1902925.py
index f0e823e2a4..59105c6cc6 100644
--- a/nova/tests/functional/regressions/test_bug_1902925.py
+++ b/nova/tests/functional/regressions/test_bug_1902925.py
@@ -28,6 +28,11 @@ class ComputeVersion5xPinnedRpcTests(integrated_helpers._IntegratedTestBase):
self.compute1 = self._start_compute(host='host1')
def _test_rebuild_instance_with_compute_rpc_pin(self, version_cap):
+ # Since passing the latest microversion (>= 2.93) passes
+ # the 'reimage_boot_volume' parameter as True and it is
+ # not acceptable with compute RPC version (required 6.1)
+ # These tests fail, so assigning microversion to 2.92
+ self.api.microversion = '2.92'
self.flags(compute=version_cap, group='upgrade_levels')
server_req = self._build_server(networks='none')
diff --git a/nova/tests/functional/test_boot_from_volume.py b/nova/tests/functional/test_boot_from_volume.py
index 0b963b5aa3..6396954bf4 100644
--- a/nova/tests/functional/test_boot_from_volume.py
+++ b/nova/tests/functional/test_boot_from_volume.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import fixtures
from unittest import mock
from nova import context
@@ -50,6 +51,9 @@ class BootFromVolumeTest(integrated_helpers._IntegratedTestBase):
self.flags(allow_resize_to_same_host=True)
super(BootFromVolumeTest, self).setUp()
self.admin_api = self.api_fixture.admin_api
+ self.useFixture(nova_fixtures.CinderFixture(self))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeVirtAPI.wait_for_instance_event'))
def test_boot_from_volume_larger_than_local_gb(self):
# Verify no local disk is being used currently
@@ -138,6 +142,42 @@ class BootFromVolumeTest(integrated_helpers._IntegratedTestBase):
image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
post_data = {'rebuild': {'imageRef': image_uuid}}
self.api.post_server_action(server_id, post_data)
+
+ def test_rebuild_volume_backed_larger_than_local_gb(self):
+ # Verify no local disk is being used currently
+ self._verify_zero_local_gb_used()
+
+ # Create flavors with disk larger than available host local disk
+ flavor_id = self._create_flavor(memory_mb=64, vcpu=1, disk=8192,
+ ephemeral=0)
+
+ # Boot a server with a flavor disk larger than the available local
+ # disk. It should succeed for boot from volume.
+ server = self._build_server(image_uuid='', flavor_id=flavor_id)
+ volume_uuid = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
+ bdm = {'boot_index': 0,
+ 'uuid': volume_uuid,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'}
+ server['block_device_mapping_v2'] = [bdm]
+ created_server = self.api.post_server({"server": server})
+ server_id = created_server['id']
+ self._wait_for_state_change(created_server, 'ACTIVE')
+
+ # Check that hypervisor local disk reporting is still 0
+ self._verify_zero_local_gb_used()
+ # Check that instance has not been saved with 0 root_gb
+ self._verify_instance_flavor_not_zero(server_id)
+ # Check that request spec has not been saved with 0 root_gb
+ self._verify_request_spec_flavor_not_zero(server_id)
+
+ # Rebuild
+ # The image_uuid is from CinderFixture for the
+ # volume representing IMAGE_BACKED_VOL.
+ self.api.microversion = '2.93'
+ image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
+ post_data = {'rebuild': {'imageRef': image_uuid}}
+ self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index ee8e30df0a..d1ab84aa7b 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -20,6 +20,7 @@ import time
from unittest import mock
import zlib
+from cinderclient import exceptions as cinder_exception
from keystoneauth1 import adapter
from oslo_config import cfg
from oslo_log import log as logging
@@ -1514,6 +1515,90 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase):
'volume-backed server', str(resp))
+class ServerRebuildTestCaseV293(integrated_helpers._IntegratedTestBase):
+ api_major_version = 'v2.1'
+
+ def setUp(self):
+ super(ServerRebuildTestCaseV293, self).setUp()
+ self.cinder = nova_fixtures.CinderFixture(self)
+ self.useFixture(self.cinder)
+
+ def _bfv_server(self):
+ server_req_body = {
+ # There is no imageRef because this is boot from volume.
+ 'server': {
+ 'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
+ 'name': 'test_volume_backed_rebuild_different_image',
+ 'networks': [],
+ 'block_device_mapping_v2': [{
+ 'boot_index': 0,
+ 'uuid':
+ nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ }]
+ }
+ }
+ server = self.api.post_server(server_req_body)
+ return self._wait_for_state_change(server, 'ACTIVE')
+
+ def _test_rebuild(self, server):
+ self.api.microversion = '2.93'
+ # Now rebuild the server with a different image than was used to create
+ # our fake volume.
+ rebuild_image_ref = self.glance.auto_disk_config_enabled_image['id']
+ rebuild_req_body = {'rebuild': {'imageRef': rebuild_image_ref}}
+
+ with mock.patch.object(self.compute.manager.virtapi,
+ 'wait_for_instance_event'):
+ self.api.api_post('/servers/%s/action' % server['id'],
+ rebuild_req_body,
+ check_response_status=[202])
+
+ def test_volume_backed_rebuild_root_v293(self):
+ server = self._bfv_server()
+ self._test_rebuild(server)
+
+ def test_volume_backed_rebuild_root_create_failed(self):
+ server = self._bfv_server()
+ error = cinder_exception.ClientException(code=500)
+ with mock.patch.object(volume.cinder.API, 'attachment_create',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+ def test_volume_backed_rebuild_root_instance_deleted(self):
+ server = self._bfv_server()
+ error = exception.InstanceNotFound(instance_id=server['id'])
+ with mock.patch.object(self.compute.manager, '_detach_root_volume',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+ def test_volume_backed_rebuild_root_delete_old_failed(self):
+ server = self._bfv_server()
+ error = cinder_exception.ClientException(code=500)
+ with mock.patch.object(volume.cinder.API, 'attachment_delete',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+
class ServersTestV280(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
diff --git a/nova/tests/unit/cmd/test_policy.py b/nova/tests/unit/cmd/test_policy.py
index 60e8e32c75..df51665959 100644
--- a/nova/tests/unit/cmd/test_policy.py
+++ b/nova/tests/unit/cmd/test_policy.py
@@ -129,7 +129,7 @@ class TestPolicyCheck(test.NoDBTestCase):
def test_filter_rules_non_admin(self):
context = nova_context.RequestContext()
- rule_conditions = [base_policies.PROJECT_READER]
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
@@ -156,7 +156,7 @@ class TestPolicyCheck(test.NoDBTestCase):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- rule_conditions = [base_policies.PROJECT_READER]
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(db_context, instance, expected_rules)
diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py
index 4f2510438d..f5fcc168ee 100644
--- a/nova/tests/unit/cmd/test_status.py
+++ b/nova/tests/unit/cmd/test_status.py
@@ -39,7 +39,6 @@ from nova import exception
# in the tests, we don't use them in the actual CLI.
from nova import objects
from nova.objects import service
-from nova import policy
from nova import test
from nova.tests import fixtures as nova_fixtures
@@ -393,60 +392,6 @@ class TestUpgradeCheckCinderAPI(test.NoDBTestCase):
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
-class TestUpgradeCheckPolicy(test.NoDBTestCase):
-
- new_default_status = upgradecheck.Code.WARNING
-
- def setUp(self):
- super(TestUpgradeCheckPolicy, self).setUp()
- self.cmd = status.UpgradeCommands()
- self.rule_name = "context_is_admin"
-
- def tearDown(self):
- super(TestUpgradeCheckPolicy, self).tearDown()
- # Check if policy is reset back after the upgrade check
- self.assertIsNone(policy._ENFORCER)
-
- def test_policy_rule_with_new_defaults(self):
- new_default = "role:admin and system_scope:all"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
- self.assertEqual(self.new_default_status,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_old_defaults(self):
- new_default = "is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_both_defaults(self):
- new_default = "(role:admin and system_scope:all) or is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_checks_with_fresh_init_and_no_policy_override(self):
- self.policy = self.useFixture(nova_fixtures.OverridePolicyFixture(
- rules_in_file={}))
- policy.reset()
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
-
-class TestUpgradeCheckPolicyEnableScope(TestUpgradeCheckPolicy):
-
- new_default_status = upgradecheck.Code.SUCCESS
-
- def setUp(self):
- super(TestUpgradeCheckPolicyEnableScope, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
-
class TestUpgradeCheckOldCompute(test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index 73b36c2ef0..ca72474a4c 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -4004,6 +4004,155 @@ class _ComputeAPIUnitTestMixIn(object):
_checks_for_create_and_rebuild.assert_called_once_with(
self.context, None, image, flavor, {}, [], None)
+ @ddt.data(True, False)
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed(self, reimage_boot_vol,
+ _record_action_start, _checks_for_create_and_rebuild,
+ _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where the instance is volume backed and we rebuild
+ with following cases:
+
+ 1) reimage_boot_volume=True
+ 2) reimage_boot_volume=False
+
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ flavor = instance.get_flavor()
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm), \
+ mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance') as rebuild_instance:
+ if reimage_boot_vol:
+ self.compute_api.rebuild(self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=True)
+ rebuild_instance.assert_called_once_with(self.context,
+ instance=instance, new_pass=admin_pass,
+ image_ref=uuids.image_ref,
+ orig_image_ref=None, orig_sys_metadata={},
+ injected_files=[], bdms=bdms,
+ preserve_ephemeral=False, host=None,
+ request_spec=fake_spec,
+ reimage_boot_volume=True)
+ _check_auto_disk_config.assert_called_once_with(
+ image=image, auto_disk_config=None)
+ _checks_for_create_and_rebuild.assert_called_once_with(
+ self.context, None, image, flavor, {}, [], root_bdm)
+ mock_get_bdms.assert_called_once_with(
+ self.context, instance.uuid)
+ else:
+ self.assertRaises(
+ exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False)
+
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed_fails(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where we don't pass parameters to rebuild
+ boot volume
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm):
+ self.assertRaises(exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False)
+
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@@ -4052,7 +4201,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4125,7 +4274,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=None,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
# assert the request spec was modified so the scheduler picks
# the existing instance host/node
req_spec_save.assert_called_once_with()
@@ -4193,7 +4342,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4252,7 +4401,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4316,7 +4465,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index dadfa8fe25..314c29f583 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -2730,7 +2730,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_driver(self):
@@ -2760,7 +2761,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2812,7 +2814,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=bdms, recreate=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={},
- on_shared_storage=False, request_spec=None, accel_uuids=[])
+ on_shared_storage=False, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2831,7 +2834,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits=None,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_launched_at_time(self):
@@ -2852,7 +2855,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata={}, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={}, request_spec=None,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False)
instance.refresh()
self.assertEqual(cur_time,
instance['launched_at'].replace(tzinfo=None))
@@ -2885,7 +2888,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=injected_files, new_pass="new_password",
orig_sys_metadata=sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
self.compute.terminate_instance(self.context, instance, [])
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@@ -4612,7 +4616,8 @@ class ComputeTestCase(BaseTestCase,
'limits': {},
'request_spec': None,
'on_shared_storage': False,
- 'accel_uuids': ()}),
+ 'accel_uuids': (),
+ 'reimage_boot_volume': False}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
@@ -5130,7 +5135,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=[], new_pass=password,
orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
inst_ref.refresh()
@@ -13534,7 +13540,7 @@ class EvacuateHostTestCase(BaseTestCase):
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage, migration=migration,
preserve_ephemeral=False, scheduled_node=node, limits=limits,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False)
if vm_states_is_stopped:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index b9cacfa82e..2aadcd106c 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -5305,7 +5305,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None, None,
- recreate, False, False, None, scheduled_node, {}, None, [])
+ recreate, False, False, None, scheduled_node, {}, None, [], False)
mock_set.assert_called_once_with(None, 'failed')
mock_notify_about_instance_usage.assert_called_once_with(
mock.ANY, instance, 'rebuild.error', fault=mock_rebuild.side_effect
@@ -5416,7 +5416,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
None, recreate=True, on_shared_storage=None,
preserve_ephemeral=False, migration=None,
scheduled_node='fake-node',
- limits={}, request_spec=request_spec, accel_uuids=[])
+ limits={}, request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5455,7 +5456,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, instance, None, None, None, None, None, None,
recreate=True, on_shared_storage=None, preserve_ephemeral=False,
migration=None, scheduled_node='fake-node', limits={},
- request_spec=request_spec, accel_uuids=[])
+ request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5481,7 +5483,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None,
None, None, None, None, False,
- False, False, migration, None, {}, None, [])
+ False, False, migration, None, {}, None, [], False)
self.assertFalse(mock_get.called)
self.assertEqual(node, instance.node)
self.assertEqual('done', migration.status)
@@ -5503,7 +5505,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None,
None, True, False, False, mock.sentinel.migration, None, {},
- None, [])
+ None, [], False)
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
mock_rt.finish_evacuation.assert_called_once_with(
instance, 'new-node', mock.sentinel.migration)
@@ -5585,7 +5587,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
recreate, on_shared_storage,
preserve_ephemeral, {}, {},
self.allocations,
- mock.sentinel.mapping, [])
+ mock.sentinel.mapping, [],
+ False)
mock_notify_usage.assert_has_calls(
[mock.call(self.context, instance, "rebuild.start",
@@ -5603,8 +5606,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
provider_mappings=mock.sentinel.mapping)
mock_get_nw_info.assert_called_once_with(self.context, instance)
- def test_rebuild_default_impl(self):
- def _detach(context, bdms):
+ @ddt.data((False, False), (False, True), (True, False), (True, True))
+ @ddt.unpack
+ def test_rebuild_default_impl(self, is_vol_backed, reimage_boot_vol):
+ fake_image_meta = mock.MagicMock(id='fake_id')
+
+ def _detach(context, bdms, detach_root_bdm=True):
# NOTE(rpodolyaka): check that instance has been powered off by
# the time we detach block devices, exact calls arguments will be
# checked below
@@ -5630,13 +5637,20 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.patch.object(self.compute, '_power_off_instance',
return_value=None),
mock.patch.object(self.compute, '_get_accel_info',
- return_value=[])
+ return_value=[]),
+ mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ return_value=is_vol_backed),
+ mock.patch.object(self.compute, '_rebuild_volume_backed_instance'),
+ mock.patch.object(compute_utils, 'get_root_bdm')
) as(
mock_destroy,
mock_spawn,
mock_save,
mock_power_off,
- mock_accel_info
+ mock_accel_info,
+ mock_is_volume_backed,
+ mock_rebuild_vol_backed_inst,
+ mock_get_root,
):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = None
@@ -5646,9 +5660,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.device_metadata = None
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
+ fake_block_device_info = {
+ 'block_device_mapping': [
+ {'attachment_id': '341a8917-f74d-4473-8ee7-4ca05e5e0ab3',
+ 'volume_id': 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'target_discovered': False,
+ 'target_portal': '127.0.0.1:3260',
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-'
+ 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'target_lun': 0}}}]}
self.compute._rebuild_default_impl(self.context,
instance,
- None,
+ fake_image_meta,
[],
admin_password='new_pass',
bdms=[],
@@ -5657,16 +5681,151 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
attach_block_devices=_attach,
network_info=None,
evacuate=False,
- block_device_info=None,
- preserve_ephemeral=False)
+ block_device_info=
+ fake_block_device_info,
+ preserve_ephemeral=False,
+ reimage_boot_volume=
+ reimage_boot_vol)
self.assertTrue(mock_save.called)
self.assertTrue(mock_spawn.called)
mock_destroy.assert_called_once_with(
self.context, instance,
- network_info=None, block_device_info=None)
+ network_info=None, block_device_info=fake_block_device_info)
mock_power_off.assert_called_once_with(
instance, clean_shutdown=True)
+ if is_vol_backed and reimage_boot_vol:
+ mock_rebuild_vol_backed_inst.assert_called_once_with(
+ self.context, instance, [], fake_image_meta.id)
+ else:
+ mock_rebuild_vol_backed_inst.assert_not_called()
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+ events = [('volume-reimaged', root_bdm.volume_id)]
+ image_size_gb = 1
+ deadline = CONF.reimage_timeout_per_gb * image_size_gb
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as (
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ # 1024 ** 3 = 1073741824
+ mock_get_img.return_value = {'size': 1073741824}
+ self.compute._rebuild_volume_backed_instance(
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_vol_api.reimage_volume.assert_called_once_with(
+ self.context, uuids.volume_id, uuids.image_id,
+ reimage_reserved=True)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+ mock_get_root_bdm.assert_called_once_with(
+ self.context, instance, bdms)
+ wait_inst_event.assert_called_once_with(
+ instance, events, deadline=deadline,
+ error_callback=self.compute._reimage_failed_callback)
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance_image_not_found(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as(
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ mock_get_img.side_effect = exception.ImageNotFound(
+ image_id=uuids.image_id)
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ mock_get_img.return_value = {'size': 1}
+ self.assertRaises(
+ exception.BuildAbortException,
+ self.compute._rebuild_volume_backed_instance,
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+
+ @mock.patch.object(objects.Instance, 'save', return_value=None)
+ @mock.patch.object(fake_driver.SmallFakeDriver, 'detach_volume')
+ @mock.patch.object(cinder.API, 'roll_detaching')
+ def test__detach_root_volume(self, mock_roll_detach, mock_detach,
+ mock_save):
+ exception_list = [
+ '',
+ exception.DiskNotFound(location="not\\here"),
+ exception.DeviceDetachFailed(device="fake_dev", reason="unknown"),
+ ]
+ mock_detach.side_effect = exception_list
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.assertRaises(exception.DeviceDetachFailed,
+ self.compute._detach_root_volume,
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
+ self.assertRaises(Exception, self.compute._detach_root_volume, # noqa
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
def test_do_rebuild_instance_check_trusted_certs(self):
"""Tests the scenario that we're rebuilding an instance with
@@ -5688,7 +5847,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
request_spec=objects.RequestSpec(),
allocations=self.allocations,
request_group_resource_providers_mapping=mock.sentinel.mapping,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False)
self.assertIn('Trusted image certificates provided on host', str(ex))
def test_reverts_task_state_instance_not_found(self):
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index 541cc1012e..55d0fc53e8 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -835,7 +835,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
- limits=None, request_spec=None, accel_uuids=[], version='6.0')
+ limits=None, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, version='6.1')
def test_rebuild_instance_old_rpcapi(self):
# With rpcapi < 5.12, accel_uuids must be dropped in the client call.
@@ -862,20 +863,58 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
'migration': None,
'limits': None
}
+ # Pass reimage_boot_volume to the client call...
compute_api.rebuild_instance(
ctxt, instance=self.fake_instance_obj,
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
- node=None, host=None, **rebuild_args)
+ node=None, host=None, reimage_boot_volume=False,
+ **rebuild_args)
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.12')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
+ # ...and assert that it does not show up on the wire before 6.1
mock_cctx.cast.assert_called_with( # No accel_uuids
ctxt, 'rebuild_instance',
instance=self.fake_instance_obj,
scheduled_node=None, **rebuild_args)
+ def test_rebuild_instance_vol_backed_old_rpcapi(self):
+ # With rpcapi < 6.1, if reimage_boot_volume is True then we
+ # should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to [False, True, True], so that 6.0
+ # version is used.
+ mock_client.can_send_version.side_effect = [False, True, True]
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ }
+ self.assertRaises(
+ exception.NovaException, compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
+ mock_client.can_send_version.assert_has_calls([mock.call('6.1')])
+
def test_reserve_block_device_name(self):
self.flags(long_rpc_timeout=1234)
self._test_compute_api('reserve_block_device_name', 'call',
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 4950bf7f4b..e942217a6c 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -388,7 +388,8 @@ class _BaseTaskTestCase(object):
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host',
- 'request_spec': None}
+ 'request_spec': None,
+ 'reimage_boot_volume': False}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
@@ -4750,6 +4751,42 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
mock.sentinel.migration)
can_send_version.assert_called_once_with('1.23')
+ def test_rebuild_instance_volume_backed(self):
+ inst_obj = self._create_fake_instance_obj()
+ version = '1.24'
+ cctxt_mock = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+
+ @mock.patch.object(self.conductor.client, 'prepare',
+ return_value=cctxt_mock)
+ @mock.patch.object(self.conductor.client, 'can_send_version',
+ return_value=True)
+ def _test(mock_can_send_ver, prepare_mock):
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ prepare_mock.assert_called_once_with(version=version)
+ kw = {'instance': inst_obj, **rebuild_args}
+ cctxt_mock.cast.assert_called_once_with(
+ self.context, 'rebuild_instance', **kw)
+ _test()
+
+ def test_rebuild_instance_volume_backed_old_service(self):
+ """Tests rebuild_instance_volume_backed when the service is too old"""
+ inst_obj = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.assertRaises(exc.NovaException,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj,
+ **rebuild_args)
+ can_send_version.assert_called_once_with('1.24')
+
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index 30f3502bc8..fc25bef2bc 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -587,12 +587,12 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
- def test_reject_open_redirect(self):
+ def test_reject_open_redirect(self, url='//example.com/%2F..'):
# This will test the behavior when an attempt is made to cause an open
# redirect. It should be rejected.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
- b'GET //example.com/%2F.. HTTP/1.1\r\n',
+ f'GET {url} HTTP/1.1\r\n'.encode('utf-8'),
b''
]
@@ -617,41 +617,32 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
result = output.readlines()
# Verify no redirect happens and instead a 400 Bad Request is returned.
- self.assertIn('400 URI must not start with //', result[0].decode())
+ # NOTE: As of python 3.10.6 there is a fix for this vulnerability,
+ # which will cause a 301 Moved Permanently error to be returned
+ # instead that redirects to a sanitized version of the URL with extra
+ # leading '/' characters removed.
+ # See https://github.com/python/cpython/issues/87389 for details.
+ # We will consider either response to be valid for this test. This will
+ # also help if and when the above fix gets backported to older versions
+ # of python.
+ errmsg = result[0].decode()
+ expected_nova = '400 URI must not start with //'
+ expected_cpython = '301 Moved Permanently'
+
+ self.assertTrue(expected_nova in errmsg or expected_cpython in errmsg)
+
+ # If we detect the cpython fix, verify that the redirect location is
+ # now the same url but with extra leading '/' characters removed.
+ if expected_cpython in errmsg:
+ location = result[3].decode()
+ location = location.removeprefix('Location: ').rstrip('\r\n')
+ self.assertTrue(
+ location.startswith('/example.com/%2F..'),
+ msg='Redirect location is not the expected sanitized URL',
+ )
def test_reject_open_redirect_3_slashes(self):
- # This will test the behavior when an attempt is made to cause an open
- # redirect. It should be rejected.
- mock_req = mock.MagicMock()
- mock_req.makefile().readline.side_effect = [
- b'GET ///example.com/%2F.. HTTP/1.1\r\n',
- b''
- ]
-
- # Collect the response data to verify at the end. The
- # SimpleHTTPRequestHandler writes the response data by calling the
- # request socket sendall() method.
- self.data = b''
-
- def fake_sendall(data):
- self.data += data
-
- mock_req.sendall.side_effect = fake_sendall
-
- client_addr = ('8.8.8.8', 54321)
- mock_server = mock.MagicMock()
- # This specifies that the server will be able to handle requests other
- # than only websockets.
- mock_server.only_upgrade = False
-
- # Constructing a handler will process the mock_req request passed in.
- websocketproxy.NovaProxyRequestHandler(
- mock_req, client_addr, mock_server)
-
- # Verify no redirect happens and instead a 400 Bad Request is returned.
- self.data = self.data.decode()
- self.assertIn('Error code: 400', self.data)
- self.assertIn('Message: URI must not start with //', self.data)
+ self.test_reject_open_redirect(url='///example.com/%2F..')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_no_compute_rpcapi_with_invalid_token(self, mock_validate):
diff --git a/nova/tests/unit/notifications/objects/test_notification.py b/nova/tests/unit/notifications/objects/test_notification.py
index 1fddd26045..41352f2e48 100644
--- a/nova/tests/unit/notifications/objects/test_notification.py
+++ b/nova/tests/unit/notifications/objects/test_notification.py
@@ -386,7 +386,7 @@ notification_object_data = {
# ImageMetaProps, so when you see a fail here for that reason, you must
# *also* bump the version of ImageMetaPropsPayload. See its docstring for
# more information.
- 'ImageMetaPropsPayload': '1.10-44cf0030dc94a1a60ba7a0e222e854d6',
+ 'ImageMetaPropsPayload': '1.11-938809cd33367c52cbc814fb9b6783dc',
'InstanceActionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionPayload': '1.8-4fa3da9cbf0761f1f700ae578f36dc2f',
'InstanceActionRebuildNotification':
diff --git a/nova/tests/unit/objects/test_image_meta.py b/nova/tests/unit/objects/test_image_meta.py
index e47f653ba2..27d91290ad 100644
--- a/nova/tests/unit/objects/test_image_meta.py
+++ b/nova/tests/unit/objects/test_image_meta.py
@@ -108,6 +108,7 @@ class TestImageMetaProps(test.NoDBTestCase):
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
+ 'hw_locked_memory': 'true',
'trait:CUSTOM_TRUSTED': 'required',
# Fill sane values for the rest here
}
@@ -116,6 +117,7 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual('vga', virtprops.hw_video_model)
self.assertEqual(512, virtprops.hw_video_ram)
self.assertTrue(virtprops.hw_qemu_guest_agent)
+ self.assertTrue(virtprops.hw_locked_memory)
self.assertIsNotNone(virtprops.traits_required)
self.assertIn('CUSTOM_TRUSTED', virtprops.traits_required)
@@ -285,6 +287,28 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual([set([0, 1, 2, 3])],
virtprops.hw_numa_cpus)
+ def test_locked_memory_prop(self):
+ props = {'hw_locked_memory': 'true'}
+ virtprops = objects.ImageMetaProps.from_dict(props)
+ self.assertTrue(virtprops.hw_locked_memory)
+
+ def test_obj_make_compatible_hw_locked_memory(self):
+ """Check 'hw_locked_memory' compatibility."""
+ # assert that 'hw_locked_memory' is supported
+ # on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_locked_memory='true',
+ )
+ primitive = obj.obj_to_primitive('1.33')
+ self.assertIn('hw_locked_memory',
+ primitive['nova_object.data'])
+ self.assertTrue(primitive['nova_object.data']['hw_locked_memory'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.32')
+ self.assertNotIn('hw_locked_memory',
+ primitive['nova_object.data'])
+
def test_get_unnumbered_trait_fields(self):
"""Tests that only valid un-numbered required traits are parsed from
the properties.
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index 6672967f90..9e49b549f3 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -1072,14 +1072,14 @@ object_data = {
'HyperVLiveMigrateData': '1.4-e265780e6acfa631476c8170e8d6fce0',
'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
- 'ImageMetaProps': '1.32-4967d35948af08b710b8b861f3fff0f9',
+ 'ImageMetaProps': '1.33-6b7a29f769e6b8eee3f05832d78c85a2',
'Instance': '2.7-d187aec68cad2e4d8b8a03a68e4739ce',
'InstanceAction': '1.2-9a5abc87fdd3af46f45731960651efb5',
'InstanceActionEvent': '1.4-5b1f361bd81989f8bb2c20bb7e8a4cb4',
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
'InstanceActionList': '1.1-a2b2fb6006b47c27076d3a1d48baa759',
'InstanceDeviceMetadata': '1.0-74d78dd36aa32d26d2769a1b57caf186',
- 'InstanceExternalEvent': '1.4-06c2dfcf2d2813c24cd37ee728524f1a',
+ 'InstanceExternalEvent': '1.5-1ec57351a9851c1eb43ccd90662d6dd0',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451',
'InstanceGroup': '1.11-852ac511d30913ee88f3c3a869a8f30a',
diff --git a/nova/tests/unit/policies/base.py b/nova/tests/unit/policies/base.py
index 5ebccd9121..68a051b26c 100644
--- a/nova/tests/unit/policies/base.py
+++ b/nova/tests/unit/policies/base.py
@@ -134,6 +134,44 @@ class BasePolicyTest(test.TestCase):
self.system_admin_context, self.system_foo_context,
self.system_member_context, self.system_reader_context,
])
+ # A few commmon set of contexts to be used in tests
+ #
+ # With scope disable and no legacy rule, any admin,
+ # project members have access. No other role in that project
+ # will have access.
+ self.project_member_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ ])
+ # With scope enable and legacy rule, only project scoped admin
+ # and any role in that project will have access.
+ self.project_m_r_or_admin_with_scope_and_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin
+ # and project members have access. No other role in that project
+ # or system scoped token will have access.
+ self.project_member_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context
+ ])
+ # With scope disable and no legacy rule, any admin,
+ # project members, and project reader have access. No other
+ # role in that project will have access.
+ self.project_reader_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin,
+ # project members, and project reader have access. No other role
+ # in that project or system scoped token will have access.
+ self.project_reader_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context
+ ])
if self.without_deprecated_rules:
# To simulate the new world, remove deprecations by overriding
@@ -149,6 +187,10 @@ class BasePolicyTest(test.TestCase):
"role:member and project_id:%(project_id)s",
"project_reader_api":
"role:reader and project_id:%(project_id)s",
+ "project_member_or_admin":
+ "rule:project_member_api or rule:context_is_admin",
+ "project_reader_or_admin":
+ "rule:project_reader_api or rule:context_is_admin",
})
self.policy.set_rules(self.rules_without_deprecation,
overwrite=False)
diff --git a/nova/tests/unit/policies/test_admin_actions.py b/nova/tests/unit/policies/test_admin_actions.py
index 60458a1a80..21157fd832 100644
--- a/nova/tests/unit/policies/test_admin_actions.py
+++ b/nova/tests/unit/policies/test_admin_actions.py
@@ -78,12 +78,6 @@ class AdminActionsNoLegacyNoScopePolicyTest(AdminActionsPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(AdminActionsNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule and scope disable, only project admin
- # is able to perform server admin actions.
- self.project_action_authorized_contexts = [self.project_admin_context]
-
class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
"""Test Admin Actions APIs policies with system scope enabled.
@@ -111,10 +105,3 @@ class AdminActionsScopeTypeNoLegacyPolicyTest(AdminActionsScopeTypePolicyTest):
only project admin is able to perform admin action on their server.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(AdminActionsScopeTypeNoLegacyPolicyTest, self).setUp()
- # This is how our RBAC will looks like. With no legacy rule
- # and scope enable, only project admin is able to perform
- # server admin actions.
- self.project_action_authorized_contexts = [self.project_admin_context]
diff --git a/nova/tests/unit/policies/test_admin_password.py b/nova/tests/unit/policies/test_admin_password.py
index e5975086f4..01cce2950e 100644
--- a/nova/tests/unit/policies/test_admin_password.py
+++ b/nova/tests/unit/policies/test_admin_password.py
@@ -101,8 +101,8 @@ class AdminPasswordNoLegacyNoScopePolicyTest(AdminPasswordPolicyTest):
super(AdminPasswordNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to change the server password.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
@@ -119,10 +119,8 @@ class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
super(AdminPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to change password.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class AdminPasswordScopeTypeNoLegacyTest(AdminPasswordScopeTypePolicyTest):
@@ -139,5 +137,5 @@ class AdminPasswordScopeTypeNoLegacyTest(AdminPasswordScopeTypePolicyTest):
# With scope enable and no legacy rule only project admin/member
# will be able to change password for the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_attach_interfaces.py b/nova/tests/unit/policies/test_attach_interfaces.py
index 781ce29e8a..33c531c9c7 100644
--- a/nova/tests/unit/policies/test_attach_interfaces.py
+++ b/nova/tests/unit/policies/test_attach_interfaces.py
@@ -117,22 +117,21 @@ class AttachInterfacesNoLegacyNoScopePolicyTest(AttachInterfacesPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
ai_policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(AttachInterfacesNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
@@ -149,12 +148,10 @@ class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
super(AttachInterfacesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
@@ -217,20 +214,19 @@ class AttachInterfacesScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
ai_policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(AttachInterfacesScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server interface.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_console_output.py b/nova/tests/unit/policies/test_console_output.py
index 0c3ed9ed2d..c1bccf1d55 100644
--- a/nova/tests/unit/policies/test_console_output.py
+++ b/nova/tests/unit/policies/test_console_output.py
@@ -73,8 +73,8 @@ class ConsoleOutputNoLegacyNoScopePolicyTest(ConsoleOutputPolicyTest):
super(ConsoleOutputNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member is able to
# get the server console.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
@@ -92,10 +92,8 @@ class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
super(ConsoleOutputScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ConsoleOutputScopeTypeNoLegacyPolicyTest(
@@ -110,5 +108,5 @@ class ConsoleOutputScopeTypeNoLegacyPolicyTest(
# With scope enable and no legacy rule, only project admin/member can
# get the server console.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_create_backup.py b/nova/tests/unit/policies/test_create_backup.py
index 83762e2214..b54ed366df 100644
--- a/nova/tests/unit/policies/test_create_backup.py
+++ b/nova/tests/unit/policies/test_create_backup.py
@@ -81,8 +81,8 @@ class CreateBackupNoLegacyNoScopePolicyTest(CreateBackupPolicyTest):
super(CreateBackupNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to create the server backup.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
@@ -100,10 +100,8 @@ class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
super(CreateBackupScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system users to create the server.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class CreateBackupScopeTypeNoLegacyPolicyTest(CreateBackupScopeTypePolicyTest):
@@ -116,5 +114,5 @@ class CreateBackupScopeTypeNoLegacyPolicyTest(CreateBackupScopeTypePolicyTest):
super(CreateBackupScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to create the server backup.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_deferred_delete.py b/nova/tests/unit/policies/test_deferred_delete.py
index faa26e7b15..08bb0213f4 100644
--- a/nova/tests/unit/policies/test_deferred_delete.py
+++ b/nova/tests/unit/policies/test_deferred_delete.py
@@ -105,16 +105,16 @@ class DeferredDeleteNoLegacyNoScopePolicyTest(DeferredDeletePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
dd_policies.BASE_POLICY_NAME % 'restore':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
dd_policies.BASE_POLICY_NAME % 'force':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(DeferredDeleteNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member is able to force
# delete or restore server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
@@ -132,10 +132,8 @@ class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
super(DeferredDeleteScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class DeferredDeleteScopeTypeNoLegacyPolicyTest(
@@ -146,14 +144,14 @@ class DeferredDeleteScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
dd_policies.BASE_POLICY_NAME % 'restore':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
dd_policies.BASE_POLICY_NAME % 'force':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(DeferredDeleteScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enable and no legacy rule, only project admin/member is
# able to force delete or restore server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_evacuate.py b/nova/tests/unit/policies/test_evacuate.py
index 491b17779c..ddc8241003 100644
--- a/nova/tests/unit/policies/test_evacuate.py
+++ b/nova/tests/unit/policies/test_evacuate.py
@@ -114,12 +114,6 @@ class EvacuateNoLegacyNoScopePolicyTest(EvacuatePolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(EvacuateNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule and scope disable, only project admin
- # will be able to evacuate server.
- self.project_action_authorized_contexts = [self.project_admin_context]
-
class EvacuateScopeTypePolicyTest(EvacuatePolicyTest):
"""Test Evacuate APIs policies with system scope enabled.
@@ -146,10 +140,3 @@ class EvacuateScopeTypeNoLegacyPolicyTest(EvacuateScopeTypePolicyTest):
and no more deprecated rules which means scope + new defaults.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(EvacuateScopeTypeNoLegacyPolicyTest, self).setUp()
- # This is how our RBAC will looks like. With no legacy rule
- # and scope enable, only project admin is able to evacuate
- # server.
- self.project_action_authorized_contexts = [self.project_admin_context]
diff --git a/nova/tests/unit/policies/test_floating_ips.py b/nova/tests/unit/policies/test_floating_ips.py
index 12beca9d56..26c721e9e9 100644
--- a/nova/tests/unit/policies/test_floating_ips.py
+++ b/nova/tests/unit/policies/test_floating_ips.py
@@ -152,24 +152,24 @@ class FloatingIPNoLegacyNoScopePolicyTest(FloatingIPPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
fip_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(FloatingIPNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to add/remove FIP to server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
# With no legacy, project other roles like foo will not be able
# to operate on FIP.
self.member_authorized_contexts = [
@@ -203,10 +203,8 @@ class FloatingIPScopeTypePolicyTest(FloatingIPPolicyTest):
super(FloatingIPScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system users.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.member_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
@@ -228,24 +226,24 @@ class FloatingIPScopeTypeNoLegacyPolicyTest(FloatingIPScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
fip_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(FloatingIPScopeTypeNoLegacyPolicyTest, self).setUp()
# Check that system admin or owner is able to
# add/delete FIP to server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
# With no legacy and scope enabled, system users and project
# other roles like foo will not be able to operate FIP.
self.member_authorized_contexts = [
diff --git a/nova/tests/unit/policies/test_instance_actions.py b/nova/tests/unit/policies/test_instance_actions.py
index 2225261d5e..1ca9a66c14 100644
--- a/nova/tests/unit/policies/test_instance_actions.py
+++ b/nova/tests/unit/policies/test_instance_actions.py
@@ -140,20 +140,17 @@ class InstanceActionsNoLegacyNoScopePolicyTest(InstanceActionsPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
ia_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'events':
- base_policy.PROJECT_ADMIN,
+ base_policy.ADMIN,
}
def setUp(self):
super(InstanceActionsNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule, legacy admin loose power.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
@@ -231,10 +228,8 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
# With Scope enable, system users no longer allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
@mock.patch('nova.objects.InstanceActionEventList.get_by_action')
@mock.patch('nova.objects.InstanceAction.get_by_request_id')
@@ -280,27 +275,25 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
self.assertNotIn('details', event)
-class InstanceActionsScopeTypeNoLegacyPolicyTest(InstanceActionsPolicyTest):
+class InstanceActionsScopeTypeNoLegacyPolicyTest(
+ InstanceActionsScopeTypePolicyTest):
"""Test os-instance-actions APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
ia_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'events':
- base_policy.PROJECT_ADMIN,
+ base_policy.ADMIN,
}
def setUp(self):
super(InstanceActionsScopeTypeNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
# With no legacy and scope enable, only project admin, member,
# and reader will be able to get server action and only admin
# with event details.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_limits.py b/nova/tests/unit/policies/test_limits.py
index 1141f148bb..aba647caec 100644
--- a/nova/tests/unit/policies/test_limits.py
+++ b/nova/tests/unit/policies/test_limits.py
@@ -95,7 +95,7 @@ class LimitsNoLegacyNoScopeTest(LimitsPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
- base_policy.PROJECT_ADMIN}
+ base_policy.ADMIN}
def setUp(self):
super(LimitsNoLegacyNoScopeTest, self).setUp()
@@ -141,7 +141,7 @@ class LimitsScopeTypeNoLegacyPolicyTest(LimitsScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
- base_policy.PROJECT_ADMIN}
+ base_policy.ADMIN}
def setUp(self):
super(LimitsScopeTypeNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_lock_server.py b/nova/tests/unit/policies/test_lock_server.py
index 292821c7d2..31de5cff0c 100644
--- a/nova/tests/unit/policies/test_lock_server.py
+++ b/nova/tests/unit/policies/test_lock_server.py
@@ -139,11 +139,9 @@ class LockServerNoLegacyNoScopePolicyTest(LockServerPolicyTest):
def setUp(self):
super(LockServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
- # able to lock/unlock the server and only project admin can
- # override the unlock.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ # able to lock/unlock the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class LockServerScopeTypePolicyTest(LockServerPolicyTest):
@@ -160,10 +158,8 @@ class LockServerScopeTypePolicyTest(LockServerPolicyTest):
super(LockServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to lock/unlock the server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
@@ -178,9 +174,8 @@ class LockServerScopeTypeNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
super(LockServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to lock/unlock the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
class LockServerOverridePolicyTest(LockServerScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_migrate_server.py b/nova/tests/unit/policies/test_migrate_server.py
index 43314956e6..0f750770d9 100644
--- a/nova/tests/unit/policies/test_migrate_server.py
+++ b/nova/tests/unit/policies/test_migrate_server.py
@@ -83,11 +83,6 @@ class MigrateServerNoLegacyNoScopeTest(MigrateServerPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(MigrateServerNoLegacyNoScopeTest, self).setUp()
- self.project_admin_authorized_contexts = [
- self.project_admin_context]
-
class MigrateServerScopeTypePolicyTest(MigrateServerPolicyTest):
"""Test Migrate Server APIs policies with system scope enabled.
@@ -115,12 +110,6 @@ class MigrateServerScopeTypeNoLegacyPolicyTest(
"""
without_deprecated_rules = True
- def setUp(self):
- super(MigrateServerScopeTypeNoLegacyPolicyTest, self).setUp()
- # with no legacy rule and scope enable., only project admin is able to
- # migrate the server.
- self.project_admin_authorized_contexts = [self.project_admin_context]
-
class MigrateServerOverridePolicyTest(
MigrateServerScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_multinic.py b/nova/tests/unit/policies/test_multinic.py
index cd35994f1b..852ff25965 100644
--- a/nova/tests/unit/policies/test_multinic.py
+++ b/nova/tests/unit/policies/test_multinic.py
@@ -83,16 +83,16 @@ class MultinicNoLegacyNoScopePolicyTest(MultinicPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(MultinicNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to add/remove the fixed ip.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class MultinicScopeTypePolicyTest(MultinicPolicyTest):
@@ -111,10 +111,8 @@ class MultinicScopeTypePolicyTest(MultinicPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to add/remove
# the fixed ip.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class MultinicScopeTypeNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
@@ -124,13 +122,13 @@ class MultinicScopeTypeNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(MultinicScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to add/remove the fixed ip.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_networks.py b/nova/tests/unit/policies/test_networks.py
index 25011859e3..9c3e0b735a 100644
--- a/nova/tests/unit/policies/test_networks.py
+++ b/nova/tests/unit/policies/test_networks.py
@@ -73,9 +73,9 @@ class NetworksNoLegacyNoScopePolicyTest(NetworksPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(NetworksNoLegacyNoScopePolicyTest, self).setUp()
@@ -120,9 +120,9 @@ class NetworksScopeTypeNoLegacyPolicyTest(NetworksScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(NetworksScopeTypeNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_pause_server.py b/nova/tests/unit/policies/test_pause_server.py
index aa27a7c701..86a3e616dd 100644
--- a/nova/tests/unit/policies/test_pause_server.py
+++ b/nova/tests/unit/policies/test_pause_server.py
@@ -109,8 +109,8 @@ class PauseServerNoLegacyNoScopePolicyTest(PauseServerPolicyTest):
super(PauseServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to pause/unpause the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
@@ -127,10 +127,8 @@ class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
super(PauseServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to pause/unpause the server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class PauseServerScopeTypeNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
@@ -143,5 +141,5 @@ class PauseServerScopeTypeNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
super(PauseServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to pause/unpause the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_remote_consoles.py b/nova/tests/unit/policies/test_remote_consoles.py
index a01efd8e42..a441d1c550 100644
--- a/nova/tests/unit/policies/test_remote_consoles.py
+++ b/nova/tests/unit/policies/test_remote_consoles.py
@@ -79,8 +79,8 @@ class RemoteConsolesNoLegacyNoScopePolicyTest(RemoteConsolesPolicyTest):
super(RemoteConsolesNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able get server remote consoles.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
@@ -98,10 +98,8 @@ class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to get server
# remote console.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class RemoteConsolesScopeTypeNoLegacyPolicyTest(
@@ -116,5 +114,5 @@ class RemoteConsolesScopeTypeNoLegacyPolicyTest(
super(RemoteConsolesScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to get server remote console.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_rescue.py b/nova/tests/unit/policies/test_rescue.py
index a8e41c8631..120809877c 100644
--- a/nova/tests/unit/policies/test_rescue.py
+++ b/nova/tests/unit/policies/test_rescue.py
@@ -108,16 +108,16 @@ class RescueServerNoLegacyNoScopePolicyTest(RescueServerPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
rs_policies.UNRESCUE_POLICY_NAME:
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
rs_policies.BASE_POLICY_NAME:
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(RescueServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to rescue/unrescue the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
@@ -135,10 +135,8 @@ class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to rescue/unrescue the
# server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class RescueServerScopeTypeNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
@@ -149,13 +147,13 @@ class RescueServerScopeTypeNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
rs_policies.UNRESCUE_POLICY_NAME:
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
rs_policies.BASE_POLICY_NAME:
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(RescueServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to rescue/unrescue the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_security_groups.py b/nova/tests/unit/policies/test_security_groups.py
index b7afac26cb..a9d2f484ba 100644
--- a/nova/tests/unit/policies/test_security_groups.py
+++ b/nova/tests/unit/policies/test_security_groups.py
@@ -104,22 +104,20 @@ class ServerSecurityGroupsNoLegacyNoScopePolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerSecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to add/remove SG to server and reader to get SG.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context,
- ]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SecurityGroupsPolicyTest(base.BasePolicyTest):
@@ -243,19 +241,19 @@ class SecurityGroupsNoLegacyNoScopePolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'get':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(SecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
@@ -321,15 +319,10 @@ class ServerSecurityGroupsScopeTypePolicyTest(ServerSecurityGroupsPolicyTest):
super(ServerSecurityGroupsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system users.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context
- ]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerSecurityGroupsScopeTypeNoLegacyPolicyTest(
@@ -340,23 +333,21 @@ class ServerSecurityGroupsScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerSecurityGroupsScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to add/remove the SG to their server and reader
# will get SG of server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context
- ]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class SecurityGroupsNoLegacyPolicyTest(SecurityGroupsScopeTypePolicyTest):
@@ -366,19 +357,19 @@ class SecurityGroupsNoLegacyPolicyTest(SecurityGroupsScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'get':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(SecurityGroupsNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_server_diagnostics.py b/nova/tests/unit/policies/test_server_diagnostics.py
index 932f5e2033..4a4b192baa 100644
--- a/nova/tests/unit/policies/test_server_diagnostics.py
+++ b/nova/tests/unit/policies/test_server_diagnostics.py
@@ -66,11 +66,6 @@ class ServerDiagnosticsNoLegacyNoScopeTest(ServerDiagnosticsPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(ServerDiagnosticsNoLegacyNoScopeTest, self).setUp()
- self.project_admin_authorized_contexts = [
- self.project_admin_context]
-
class ServerDiagnosticsScopeTypePolicyTest(ServerDiagnosticsPolicyTest):
"""Test Server Diagnostics APIs policies with system scope enabled.
@@ -98,12 +93,6 @@ class ServerDiagnosticsScopeTypeNoLegacyPolicyTest(
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerDiagnosticsScopeTypeNoLegacyPolicyTest, self).setUp()
- # with no legacy rule and scope enable., only project admin is able to
- # get server diagnostics.
- self.project_admin_authorized_contexts = [self.project_admin_context]
-
class ServerDiagnosticsOverridePolicyTest(
ServerDiagnosticsScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_server_groups.py b/nova/tests/unit/policies/test_server_groups.py
index f93855b175..b0df7ccb89 100644
--- a/nova/tests/unit/policies/test_server_groups.py
+++ b/nova/tests/unit/policies/test_server_groups.py
@@ -163,12 +163,10 @@ class ServerGroupNoLegacyNoScopePolicyTest(ServerGroupPolicyTest):
super(ServerGroupNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, only project admin, member will be able to delete
# the SG and also reader will be able to get the SG.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
-
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
# Even with no legacy rule, legacy admin is allowed to create SG
# use requesting context's project_id. Same for list SG.
@@ -205,16 +203,10 @@ class ServerGroupScopeTypePolicyTest(ServerGroupPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enable, it disallow system users.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- ]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- ]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_create_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
@@ -244,17 +236,16 @@ class ServerGroupScopeTypeNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
def setUp(self):
super(ServerGroupScopeTypeNoLegacyPolicyTest, self).setUp()
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
self.project_create_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.project_member_context,
self.other_project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
diff --git a/nova/tests/unit/policies/test_server_ips.py b/nova/tests/unit/policies/test_server_ips.py
index f0ce600705..b837d2d0e2 100644
--- a/nova/tests/unit/policies/test_server_ips.py
+++ b/nova/tests/unit/policies/test_server_ips.py
@@ -84,10 +84,8 @@ class ServerIpsNoLegacyNoScopePolicyTest(ServerIpsPolicyTest):
super(ServerIpsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, only project admin, member, and reader will be able
# to get their server IP addresses.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
@@ -105,11 +103,8 @@ class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enabled, system users will not be able
# to get the server IP addresses.
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerIpsScopeTypeNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
@@ -120,9 +115,7 @@ class ServerIpsScopeTypeNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
def setUp(self):
super(ServerIpsScopeTypeNoLegacyPolicyTest, self).setUp()
- # With no legacy and scope enable, only project admin, member,
+ # With no legacy and scope enable, only admin, member,
# and reader will be able to get their server IP addresses.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_metadata.py b/nova/tests/unit/policies/test_server_metadata.py
index 8b95d05894..cf4fb19e7b 100644
--- a/nova/tests/unit/policies/test_server_metadata.py
+++ b/nova/tests/unit/policies/test_server_metadata.py
@@ -119,11 +119,10 @@ class ServerMetadataNoLegacyNoScopePolicyTest(ServerMetadataPolicyTest):
def setUp(self):
super(ServerMetadataNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
@@ -140,12 +139,10 @@ class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
super(ServerMetadataScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerMetadataScopeTypeNoLegacyPolicyTest(
@@ -160,8 +157,7 @@ class ServerMetadataScopeTypeNoLegacyPolicyTest(
super(ServerMetadataScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server metadata.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_migrations.py b/nova/tests/unit/policies/test_server_migrations.py
index bf69166b53..b17d4ded1d 100644
--- a/nova/tests/unit/policies/test_server_migrations.py
+++ b/nova/tests/unit/policies/test_server_migrations.py
@@ -93,11 +93,6 @@ class ServerMigrationsNoLegacyNoScopeTest(ServerMigrationsPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(ServerMigrationsNoLegacyNoScopeTest, self).setUp()
- self.project_admin_authorized_contexts = [
- self.project_admin_context]
-
class ServerMigrationsScopeTypePolicyTest(ServerMigrationsPolicyTest):
"""Test Server Migrations APIs policies with system scope enabled.
@@ -124,12 +119,6 @@ class ServerMigrationsScopeTypeNoLegacyPolicyTest(
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerMigrationsScopeTypeNoLegacyPolicyTest, self).setUp()
- # Check that admin is able to perform operations
- # for server migrations.
- self.project_admin_authorized_contexts = [self.project_admin_context]
-
class ServerMigrationsOverridePolicyTest(
ServerMigrationsScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_server_password.py b/nova/tests/unit/policies/test_server_password.py
index 48f2046693..b163c6c562 100644
--- a/nova/tests/unit/policies/test_server_password.py
+++ b/nova/tests/unit/policies/test_server_password.py
@@ -80,18 +80,17 @@ class ServerPasswordNoLegacyNoScopePolicyTest(ServerPasswordPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'clear':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerPasswordNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
@@ -108,12 +107,10 @@ class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
super(ServerPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerPasswordScopeTypeNoLegacyPolicyTest(
@@ -124,16 +121,15 @@ class ServerPasswordScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'clear':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerPasswordScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server password.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_tags.py b/nova/tests/unit/policies/test_server_tags.py
index 1d905e2b3d..412177408c 100644
--- a/nova/tests/unit/policies/test_server_tags.py
+++ b/nova/tests/unit/policies/test_server_tags.py
@@ -132,11 +132,10 @@ class ServerTagsNoLegacyNoScopePolicyTest(ServerTagsPolicyTest):
def setUp(self):
super(ServerTagsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
@@ -153,12 +152,10 @@ class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
super(ServerTagsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerTagsScopeTypeNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
@@ -172,8 +169,7 @@ class ServerTagsScopeTypeNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
super(ServerTagsScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server tags.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_topology.py b/nova/tests/unit/policies/test_server_topology.py
index 8624c3e7e7..e2f81dfaad 100644
--- a/nova/tests/unit/policies/test_server_topology.py
+++ b/nova/tests/unit/policies/test_server_topology.py
@@ -98,11 +98,8 @@ class ServerTopologyNoLegacyNoScopePolicyTest(ServerTopologyPolicyTest):
def setUp(self):
super(ServerTopologyNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule, legacy admin loose power.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
@@ -121,10 +118,8 @@ class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
# With Scope enable, system users no longer allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerTopologyScopeTypeNoLegacyPolicyTest(
@@ -138,9 +133,6 @@ class ServerTopologyScopeTypeNoLegacyPolicyTest(
def setUp(self):
super(ServerTopologyScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
- # and reader will be able to get server topology and only admin
- # with host info.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ # and reader will be able to get server topology.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_servers.py b/nova/tests/unit/policies/test_servers.py
index af436223ae..eee1e4ba51 100644
--- a/nova/tests/unit/policies/test_servers.py
+++ b/nova/tests/unit/policies/test_servers.py
@@ -1324,7 +1324,7 @@ class ServersNoLegacyNoScopeTest(ServersPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.SERVERS % 'show:flavor-extra-specs':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
@@ -1332,23 +1332,14 @@ class ServersNoLegacyNoScopeTest(ServersPolicyTest):
# Disabling legacy rule support means that we no longer allow
# random roles on our project to take action on our
- # resources. We also do not allow admin on other projects
- # (i.e. legacy_admin), nor system (because it's admin on no
- # project).
- self.reduce_set('project_action_authorized', set([
- self.project_admin_context, self.project_member_context,
- ]))
-
- self.reduce_set('project_admin_authorized', set([
- self.project_admin_context
- ]))
+ # resources. Legacy admin will have access.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
# The only additional role that can read our resources is our
# own project_reader.
self.project_reader_authorized_contexts = (
- self.project_action_authorized_contexts |
- set([self.project_reader_context])
- )
+ self.project_reader_or_admin_with_no_scope_no_legacy)
# Disabling legacy support means random roles lose power to
# see everything in their project.
@@ -1438,7 +1429,7 @@ class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.SERVERS % 'show:flavor-extra-specs':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
@@ -1448,15 +1439,8 @@ class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
# powerful on our project. Also, we drop the "any role on the
# project means you can do stuff" behavior, so project_reader
# and project_foo lose power.
- self.reduce_set('project_action_authorized', set([
- self.project_admin_context,
- self.project_member_context,
- ]))
-
- # With no legacy rule and scope checks enable, only project
- # admin can do admin things on project resource.
- self.reduce_set('project_admin_authorized',
- set([self.project_admin_context]))
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
# Only project_reader has additional read access to our
# project resources.
diff --git a/nova/tests/unit/policies/test_shelve.py b/nova/tests/unit/policies/test_shelve.py
index 87bff30178..052f844c3d 100644
--- a/nova/tests/unit/policies/test_shelve.py
+++ b/nova/tests/unit/policies/test_shelve.py
@@ -122,9 +122,8 @@ class ShelveServerNoLegacyNoScopePolicyTest(ShelveServerPolicyTest):
# With no legacy rule, only project admin or member will be
# able to shelve/unshelve the server and only project admin can
# shelve offload the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
@@ -142,10 +141,8 @@ class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to shelve/unshelve the
# server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
@@ -160,6 +157,5 @@ class ShelveServerScopeTypeNoLegacyPolicyTest(ShelveServerScopeTypePolicyTest):
super(ShelveServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to shelve/unshelve the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_simple_tenant_usage.py b/nova/tests/unit/policies/test_simple_tenant_usage.py
index 1dbd0715d1..d6aa7af901 100644
--- a/nova/tests/unit/policies/test_simple_tenant_usage.py
+++ b/nova/tests/unit/policies/test_simple_tenant_usage.py
@@ -70,10 +70,8 @@ class SimpleTenantUsageNoLegacyNoScopePolicyTest(SimpleTenantUsagePolicyTest):
super(SimpleTenantUsageNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, project other roles like foo will not be able
# to get tenant usage.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
@@ -92,11 +90,8 @@ class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
# With Scope enable, system users no longer allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class SimpleTenantUsageScopeTypeNoLegacyPolicyTest(
@@ -109,7 +104,5 @@ class SimpleTenantUsageScopeTypeNoLegacyPolicyTest(
def setUp(self):
super(SimpleTenantUsageScopeTypeNoLegacyPolicyTest, self).setUp()
- self.project_reader_authorized_contexts = [
- self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_suspend_server.py b/nova/tests/unit/policies/test_suspend_server.py
index 729f13b4b3..7d3cde2799 100644
--- a/nova/tests/unit/policies/test_suspend_server.py
+++ b/nova/tests/unit/policies/test_suspend_server.py
@@ -107,8 +107,8 @@ class SuspendServerNoLegacyNoScopePolicyTest(SuspendServerPolicyTest):
super(SuspendServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to suspend/resume the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
@@ -125,10 +125,8 @@ class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
super(SuspendServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to suspend/resume server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class SuspendServerScopeTypeNoLegacyTest(SuspendServerScopeTypePolicyTest):
@@ -143,5 +141,5 @@ class SuspendServerScopeTypeNoLegacyTest(SuspendServerScopeTypePolicyTest):
super(SuspendServerScopeTypeNoLegacyTest, self).setUp()
# With scope enable and no legacy rule only project admin/member
# will be able to suspend/resume the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_tenant_networks.py b/nova/tests/unit/policies/test_tenant_networks.py
index dedcc3cfa9..a5bc614902 100644
--- a/nova/tests/unit/policies/test_tenant_networks.py
+++ b/nova/tests/unit/policies/test_tenant_networks.py
@@ -72,9 +72,9 @@ class TenantNetworksNoLegacyNoScopePolicyTest(TenantNetworksPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(TenantNetworksNoLegacyNoScopePolicyTest, self).setUp()
@@ -120,9 +120,9 @@ class TenantNetworksScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(TenantNetworksScopeTypeNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_volumes.py b/nova/tests/unit/policies/test_volumes.py
index 53985e7ab1..896881c03f 100644
--- a/nova/tests/unit/policies/test_volumes.py
+++ b/nova/tests/unit/policies/test_volumes.py
@@ -215,14 +215,12 @@ class VolumeAttachNoLegacyNoScopePolicyTest(VolumeAttachPolicyTest):
def setUp(self):
super(VolumeAttachNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule, only project admin, member, or reader will be
+ # With no legacy rule, only admin, member, or reader will be
# able to perform volume attachment operation on its own project.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
-
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
@@ -242,15 +240,10 @@ class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
# Scope enable will not allow system admin to perform the
# volume attachments.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
-
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
@@ -268,11 +261,10 @@ class VolumeAttachScopeTypeNoLegacyPolicyTest(VolumeAttachScopeTypePolicyTest):
# With scope enable and no legacy rule, it will not allow
# system users and project admin/member/reader will be able to
# perform volume attachment operation on its own project.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class VolumesPolicyTest(base.BasePolicyTest):
@@ -403,25 +395,25 @@ class VolumesNoLegacyNoScopePolicyTest(VolumesPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
v_policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
@@ -482,25 +474,25 @@ class VolumesScopeTypeNoLegacyPolicyTest(VolumesScopeTypePolicyTest):
rules_without_deprecation = {
v_policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index e4ae09f91c..871e836d87 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -554,7 +554,8 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show',
'project_admin_api', 'project_member_api',
- 'project_reader_api', 'project_reader_or_admin')
+ 'project_reader_api', 'project_member_or_admin',
+ 'project_reader_or_admin')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules +
self.allow_all_rules +
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
index 9505dbe31c..5a0dbb40ce 100644
--- a/nova/tests/unit/virt/libvirt/test_blockinfo.py
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -1172,7 +1172,10 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'device_type': 'lame_type',
'delete_on_termination': True},
{'disk_bus': 'sata', 'guest_format': None,
- 'device_name': '/dev/sda', 'size': 3}]
+ 'device_name': '/dev/sda', 'size': 3},
+ {'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': '{"json": "options"}'}]
expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
{'dev': 'vdb', 'type': 'disk',
'bus': 'virtio', 'format': 'ext4'},
@@ -1181,7 +1184,11 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'bus': 'scsi', 'boot_index': '1'},
{'dev': 'vdo', 'type': 'disk',
'bus': 'scsi', 'boot_index': '2'},
- {'dev': 'sda', 'type': 'disk', 'bus': 'sata'}]
+ {'dev': 'sda', 'type': 'disk', 'bus': 'sata'},
+ {'dev': 'vda', 'type': 'disk', 'bus': 'virtio',
+ 'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': {'json': 'options'}}]
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
for bdm, expected in zip(bdms, expected):
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index b994957879..b90d6a2ef6 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -3131,6 +3131,41 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(membacking.locked)
self.assertFalse(membacking.sharedpages)
+ def test_get_guest_memory_backing_config_locked_flavor(self):
+ extra_specs = {
+ "hw:locked_memory": "True",
+ "hw:mem_page_size": 1000,
+ }
+ flavor = objects.Flavor(
+ name='m1.small', memory_mb=6, vcpus=28, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
+ def test_get_guest_memory_backing_config_locked_image_meta(self):
+ extra_specs = {}
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {
+ "hw_locked_memory": "True",
+ "hw_mem_page_size": 1000,
+ }})
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
def test_get_guest_memory_backing_config_realtime_invalid_share(self):
"""Test behavior when there is no pool of shared CPUS on which to place
the emulator threads, isolating them from the instance CPU processes.
@@ -13696,7 +13731,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_fetch.assert_called_once_with(self.context, instance,
fallback_from_host=None)
mock_create.assert_called_once_with(
- disk_info['type'], mock.ANY, disk_info['virt_disk_size'])
+ '/fake/instance/dir/foo',
+ disk_info['type'],
+ disk_info['virt_disk_size'],
+ )
mock_exists.assert_called_once_with('/fake/instance/dir/foo')
def test_create_images_and_backing_qcow2(self):
@@ -13728,7 +13766,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.context, instance,
"/fake/instance/dir", disk_info)
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.privsep.path.utime')
def test_create_images_and_backing_images_not_exist_fallback(
self, mock_utime, mock_create_cow_image):
@@ -13808,7 +13846,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_utime.assert_called()
mock_create_cow_image.assert_called_once_with(
- backfile_path, '/fake/instance/dir/disk_path', virt_disk_size)
+ '/fake/instance/dir/disk_path',
+ 'qcow2',
+ virt_disk_size,
+ backing_file=backfile_path,
+ )
@mock.patch('nova.virt.libvirt.imagebackend.Image.exists',
new=mock.Mock(return_value=True))
@@ -13901,7 +13943,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertFalse(mock_fetch_image.called)
@mock.patch('nova.privsep.path.utime')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_images_and_backing_ephemeral_gets_created(
self, mock_create_cow_image, mock_utime):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -13954,14 +13996,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# TODO(efried): Should these be disk_info[path]??
mock_create_cow_image.assert_has_calls([
mock.call(
- root_backing,
CONF.instances_path + '/disk',
- disk_info_byname['disk']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk']['virt_disk_size'],
+ backing_file=root_backing,
),
mock.call(
- ephemeral_backing,
CONF.instances_path + '/disk.local',
- disk_info_byname['disk.local']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk.local']['virt_disk_size'],
+ backing_file=ephemeral_backing,
),
])
@@ -15643,7 +15687,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('nova.privsep.path.utime')
@mock.patch('nova.virt.libvirt.utils.fetch_image')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_ephemeral_specified_fs_not_valid(
self, mock_create_cow_image, mock_fetch_image, mock_utime):
CONF.set_override('default_ephemeral_format', 'ext4')
@@ -15659,10 +15703,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance,
- image_meta)
- disk_info['mapping'].pop('disk.local')
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta,
+ block_device_info=block_device_info)
with test.nested(
mock.patch('oslo_concurrency.processutils.execute'),
@@ -20155,7 +20198,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch('nova.virt.libvirt.utils.get_disk_size'),
mock.patch('nova.virt.libvirt.utils.get_disk_backing_file'),
- mock.patch('nova.virt.libvirt.utils.create_cow_image'),
+ mock.patch('nova.virt.libvirt.utils.create_image'),
mock.patch('nova.virt.libvirt.utils.extract_snapshot'),
mock.patch.object(drvr, '_set_quiesced'),
mock.patch.object(drvr, '_can_quiesce')
@@ -20198,7 +20241,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_size.assert_called_once_with(srcfile, format="qcow2")
mock_backing.assert_called_once_with(srcfile, basename=False,
format="qcow2")
- mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
+ mock_create_cow.assert_called_once_with(
+ dltfile, 'qcow2', 1004009, backing_file=bckfile)
mock_chown.assert_called_once_with(dltfile, uid=os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
@@ -20542,8 +20586,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_disk_config_image_type())
self.assertEqual(2, drvr.image_backend.by_name.call_count)
- call1 = mock.call(instance, 'disk.config', 'rbd')
- call2 = mock.call(instance, 'disk.config', 'flat')
+ call1 = mock.call(instance, 'disk.config', 'rbd',
+ disk_info_mapping=disk_mapping['disk.config'])
+ call2 = mock.call(instance, 'disk.config', 'flat',
+ disk_info_mapping=disk_mapping['disk.config'])
drvr.image_backend.by_name.assert_has_calls([call1, call2])
self.assertEqual(mock.sentinel.diskconfig, diskconfig)
@@ -23160,6 +23206,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
instance = self._create_instance(params=inst_params)
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': instance.image_ref}
instance_dir = libvirt_utils.get_instance_path(instance)
disk_path = os.path.join(instance_dir, 'disk')
@@ -23179,7 +23228,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
]
drvr._create_and_inject_local_root(
- self.context, instance, False, '', disk_images, None, None)
+ self.context, instance, disk_info['mapping'], False, '',
+ disk_images, None, None)
mock_fetch_calls = [
mock.call(test.MatchType(nova.virt.libvirt.imagebackend.Qcow2),
@@ -23262,9 +23312,13 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# config_drive is True by default, configdrive.required_by()
# returns True
instance_ref = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance_ref, image_meta)
disk_images = {'image_id': None}
- drvr._create_and_inject_local_root(self.context, instance_ref, False,
+ drvr._create_and_inject_local_root(self.context, instance_ref,
+ disk_info['mapping'], False,
'', disk_images, get_injection_info(),
None)
self.assertFalse(mock_inject.called)
@@ -23284,6 +23338,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_image.get.return_value = {'locations': [], 'disk_format': 'raw'}
instance = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': 'foo'}
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -23294,6 +23351,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_fetch.reset_mock()
drvr._create_and_inject_local_root(self.context,
instance,
+ disk_info['mapping'],
False,
'',
disk_images,
@@ -28300,7 +28358,7 @@ class _BaseSnapshotTests(test.NoDBTestCase):
@mock.patch.object(host.Host, '_get_domain')
@mock.patch('nova.virt.libvirt.utils.get_disk_size',
new=mock.Mock(return_value=0))
- @mock.patch('nova.virt.libvirt.utils.create_cow_image',
+ @mock.patch('nova.virt.libvirt.utils.create_image',
new=mock.Mock())
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file',
new=mock.Mock(return_value=None))
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
index fdac091985..0dc1009c92 100644
--- a/nova/tests/unit/virt/libvirt/test_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -163,7 +163,13 @@ class _ImageTestCase(object):
self.assertEqual(fs.source_file, image.path)
def test_libvirt_info(self):
- image = self.image_class(self.INSTANCE, self.NAME)
+ disk_info = {
+ 'bus': 'virtio',
+ 'dev': '/dev/vda',
+ 'type': 'cdrom',
+ }
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
extra_specs = {
'quota:disk_read_bytes_sec': 10 * units.Mi,
'quota:disk_read_iops_sec': 1 * units.Ki,
@@ -172,15 +178,9 @@ class _ImageTestCase(object):
'quota:disk_total_bytes_sec': 30 * units.Mi,
'quota:disk_total_iops_sec': 3 * units.Ki,
}
- disk_info = {
- 'bus': 'virtio',
- 'dev': '/dev/vda',
- 'type': 'cdrom',
- }
disk = image.libvirt_info(
- disk_info, cache_mode="none", extra_specs=extra_specs,
- boot_order="1")
+ cache_mode="none", extra_specs=extra_specs, boot_order="1")
self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk)
self.assertEqual("/dev/vda", disk.target_dev)
@@ -205,16 +205,18 @@ class _ImageTestCase(object):
get_disk_size.assert_called_once_with(image.path)
def _test_libvirt_info_scsi_with_unit(self, disk_unit):
- # The address should be set if bus is scsi and unit is set.
- # Otherwise, it should not be set at all.
- image = self.image_class(self.INSTANCE, self.NAME)
disk_info = {
'bus': 'scsi',
'dev': '/dev/sda',
'type': 'disk',
}
+ # The address should be set if bus is scsi and unit is set.
+ # Otherwise, it should not be set at all.
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
+
disk = image.libvirt_info(
- disk_info, cache_mode='none', extra_specs={}, disk_unit=disk_unit)
+ cache_mode='none', extra_specs={}, disk_unit=disk_unit)
if disk_unit:
self.assertEqual(0, disk.device_addr.controller)
self.assertEqual(disk_unit, disk.device_addr.unit)
@@ -523,7 +525,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.privsep.path.utime')
@@ -544,14 +546,14 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(
- self.TEMPLATE_PATH, self.PATH, self.SIZE)
+ self.PATH, 'qcow2', self.SIZE, backing_file=self.TEMPLATE_PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Qcow2, 'get_disk_size')
@@ -576,7 +578,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@@ -615,7 +617,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 8c24f3fb92..0b80bde49f 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -104,32 +104,60 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
@mock.patch('oslo_concurrency.processutils.execute')
- def test_create_image(self, mock_execute):
- libvirt_utils.create_image('raw', '/some/path', '10G')
- libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
- expected_args = [(('qemu-img', 'create', '-f', 'raw',
- '/some/path', '10G'),),
- (('qemu-img', 'create', '-f', 'qcow2',
- '/some/stuff', '1234567891234'),)]
- self.assertEqual(expected_args, mock_execute.call_args_list)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
- def test_create_cow_image(self, mock_info, mock_execute, mock_exists):
- mock_execute.return_value = ('stdout', None)
+ def _test_create_image(
+ self, path, disk_format, disk_size, mock_info, mock_execute,
+ backing_file=None
+ ):
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
- cluster_size=mock.sentinel.cluster_size)
- libvirt_utils.create_cow_image(mock.sentinel.backing_path,
- mock.sentinel.new_path)
- mock_info.assert_called_once_with(mock.sentinel.backing_path)
- mock_execute.assert_has_calls([mock.call(
- 'qemu-img', 'create', '-f', 'qcow2', '-o',
- 'backing_file=%s,backing_fmt=%s,cluster_size=%s' % (
- mock.sentinel.backing_path, mock.sentinel.backing_fmt,
- mock.sentinel.cluster_size),
- mock.sentinel.new_path)])
+ cluster_size=mock.sentinel.cluster_size,
+ )
+
+ libvirt_utils.create_image(
+ path, disk_format, disk_size, backing_file=backing_file)
+
+ cow_opts = []
+
+ if backing_file is None:
+ mock_info.assert_not_called()
+ else:
+ mock_info.assert_called_once_with(backing_file)
+ cow_opts = [
+ '-o',
+ f'backing_file={mock.sentinel.backing_file},'
+ f'backing_fmt={mock.sentinel.backing_fmt},'
+ f'cluster_size={mock.sentinel.cluster_size}',
+ ]
+
+ expected_args = (
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f',
+ disk_format, *cow_opts, path,
+ )
+ if disk_size is not None:
+ expected_args += (disk_size,)
+
+ self.assertEqual([(expected_args,)], mock_execute.call_args_list)
+
+ def test_create_image_raw(self):
+ self._test_create_image('/some/path', 'raw', '10G')
+
+ def test_create_image_qcow2(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ )
+
+ def test_create_image_backing_file(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ backing_file=mock.sentinel.backing_file,
+ )
+
+ def test_create_image_size_none(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', None,
+ backing_file=mock.sentinel.backing_file,
+ )
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 0afedb9d74..26ec198f08 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -2814,6 +2814,54 @@ class NumberOfSerialPortsTest(test.NoDBTestCase):
flavor, image_meta)
+class VirtLockMemoryTestCase(test.NoDBTestCase):
+ def _test_get_locked_memory_constraint(self, spec=None, props=None):
+ flavor = objects.Flavor(vcpus=16, memory_mb=2048,
+ extra_specs=spec or {})
+ image_meta = objects.ImageMeta.from_dict({"properties": props or {}})
+ return hw.get_locked_memory_constraint(flavor, image_meta)
+
+ def test_get_locked_memory_constraint_image(self):
+ self.assertTrue(
+ self._test_get_locked_memory_constraint(
+ spec={"hw:mem_page_size": "small"},
+ props={"hw_locked_memory": "True"}))
+
+ def test_get_locked_memory_conflict(self):
+ ex = self.assertRaises(
+ exception.FlavorImageLockedMemoryConflict,
+ self._test_get_locked_memory_constraint,
+ spec={
+ "hw:locked_memory": "False",
+ "hw:mem_page_size": "small"
+ },
+ props={"hw_locked_memory": "True"}
+ )
+ ex_msg = ("locked_memory value in image (True) and flavor (False) "
+ "conflict. A consistent value is expected if both "
+ "specified.")
+ self.assertEqual(ex_msg, str(ex))
+
+ def test_get_locked_memory_constraint_forbidden(self):
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {"hw:locked_memory": "True"})
+
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {},
+ {"hw_locked_memory": "True"})
+
+ def test_get_locked_memory_constraint_image_false(self):
+ # False value of locked_memory will not raise LockMemoryForbidden
+ self.assertFalse(
+ self._test_get_locked_memory_constraint(
+ spec=None,
+ props={"hw_locked_memory": "False"}))
+
+
class VirtMemoryPagesTestCase(test.NoDBTestCase):
def test_cell_instance_pagesize(self):
cell = objects.InstanceNUMACell(
diff --git a/nova/tests/unit/virt/test_virt.py b/nova/tests/unit/virt/test_virt.py
index 935af880bc..2d108c6f2d 100644
--- a/nova/tests/unit/virt/test_virt.py
+++ b/nova/tests/unit/virt/test_virt.py
@@ -102,6 +102,33 @@ class TestVirtDriver(test.NoDBTestCase):
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_RAW])
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_VHD])
+ def test_block_device_info_get_encrypted_disks(self):
+ block_device_info = {
+ 'swap': {'device_name': '/dev/sdb', 'swap_size': 1},
+ 'image': [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ ],
+ 'ephemerals': [
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ {'device_name': '/dev/vdc', 'encrypted': False},
+ ],
+ }
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ ]
+ self.assertEqual(expected, disks)
+ # Try removing 'image'
+ block_device_info.pop('image')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [{'device_name': '/dev/vdb', 'encrypted': True}]
+ self.assertEqual(expected, disks)
+ # Remove 'ephemerals'
+ block_device_info.pop('ephemerals')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ self.assertEqual([], disks)
+
class FakeMount(object):
def __init__(self, image, mount_dir, partition=None, device=None):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 301d93b148..532ed1fa50 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -20,7 +20,9 @@ Driver base-classes:
types that support that contract
"""
+import itertools
import sys
+import typing as ty
import os_resource_classes as orc
import os_traits
@@ -102,6 +104,19 @@ def block_device_info_get_mapping(block_device_info):
return block_device_mapping
+def block_device_info_get_encrypted_disks(
+ block_device_info: ty.Mapping[str, ty.Any],
+) -> ty.List['nova.virt.block_device.DriverBlockDevice']:
+ block_device_info = block_device_info or {}
+ return [
+ driver_bdm for driver_bdm in itertools.chain(
+ block_device_info.get('image', []),
+ block_device_info.get('ephemerals', []),
+ )
+ if driver_bdm.get('encrypted')
+ ]
+
+
# NOTE(aspiers): When adding new capabilities, ensure they are
# mirrored in ComputeDriver.capabilities, and that the corresponding
# values should always be standard traits in os_traits. If something
@@ -318,7 +333,8 @@ class ComputeDriver(object):
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None,
evacuate=False, block_device_info=None,
- preserve_ephemeral=False, accel_uuids=None):
+ preserve_ephemeral=False, accel_uuids=None,
+ reimage_boot_volume=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -356,6 +372,7 @@ class ComputeDriver(object):
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
:param accel_uuids: Accelerator UUIDs.
+ :param reimage_boot_volume: Re-image the volume backed instance.
"""
raise NotImplementedError()
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index ce3d232710..271a719aa2 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -1337,6 +1337,48 @@ def _get_constraint_mappings_from_flavor(flavor, key, func):
return hw_numa_map or None
+def get_locked_memory_constraint(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> ty.Optional[bool]:
+ """Validate and return the requested locked memory.
+
+ :param flavor: ``nova.objects.Flavor`` instance
+ :param image_meta: ``nova.objects.ImageMeta`` instance
+ :raises: exception.LockMemoryForbidden if mem_page_size is not set
+ while provide locked_memory value in image or flavor.
+ :returns: The locked memory flag requested.
+ """
+ mem_page_size_flavor, mem_page_size_image = _get_flavor_image_meta(
+ 'mem_page_size', flavor, image_meta)
+
+ locked_memory_flavor, locked_memory_image = _get_flavor_image_meta(
+ 'locked_memory', flavor, image_meta)
+
+ if locked_memory_flavor is not None:
+ # locked_memory_image is boolean type already
+ locked_memory_flavor = strutils.bool_from_string(locked_memory_flavor)
+
+ if locked_memory_image is not None and (
+ locked_memory_flavor != locked_memory_image
+ ):
+ # We don't allow provide different value to flavor and image
+ raise exception.FlavorImageLockedMemoryConflict(
+ image=locked_memory_image, flavor=locked_memory_flavor)
+
+ locked_memory = locked_memory_flavor
+
+ else:
+ locked_memory = locked_memory_image
+
+ if locked_memory and not (
+ mem_page_size_flavor or mem_page_size_image
+ ):
+ raise exception.LockMemoryForbidden()
+
+ return locked_memory
+
+
def _get_numa_cpu_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
@@ -2107,6 +2149,8 @@ def numa_get_constraints(flavor, image_meta):
pagesize = _get_numa_pagesize_constraint(flavor, image_meta)
vpmems = get_vpmems(flavor)
+ get_locked_memory_constraint(flavor, image_meta)
+
# If 'hw:cpu_dedicated_mask' is not found in flavor extra specs, the
# 'dedicated_cpus' variable is None, while we hope it being an empty set.
dedicated_cpus = dedicated_cpus or set()
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 04b1c68bb1..7496db5a7c 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -1630,7 +1630,8 @@ class IronicDriver(virt_driver.ComputeDriver):
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None,
evacuate=False, block_device_info=None,
- preserve_ephemeral=False, accel_uuids=None):
+ preserve_ephemeral=False, accel_uuids=None,
+ reimage_boot_volume=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
@@ -1671,7 +1672,13 @@ class IronicDriver(virt_driver.ComputeDriver):
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
:param accel_uuids: Accelerator UUIDs. Ignored by this driver.
+ :param reimage_boot_volume: Re-image the volume backed instance.
"""
+ if reimage_boot_volume:
+ raise exception.NovaException(
+ _("Ironic doesn't support rebuilding volume backed "
+ "instances."))
+
LOG.debug('Rebuild called for instance', instance=instance)
instance.task_state = task_states.REBUILD_SPAWNING
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index 9a4aa671be..4efc6fbaeb 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -73,6 +73,7 @@ import itertools
import operator
from oslo_config import cfg
+from oslo_serialization import jsonutils
from nova import block_device
@@ -400,6 +401,16 @@ def get_info_from_bdm(instance, virt_type, image_meta, bdm,
# NOTE(ndipanov): libvirt starts ordering from 1, not 0
bdm_info['boot_index'] = str(boot_index + 1)
+ # If the device is encrypted pass through the secret, format and options
+ if bdm.get('encrypted'):
+ bdm_info['encrypted'] = bdm.get('encrypted')
+ bdm_info['encryption_secret_uuid'] = bdm.get('encryption_secret_uuid')
+ bdm_info['encryption_format'] = bdm.get('encryption_format')
+ encryption_options = bdm.get('encryption_options')
+ if encryption_options:
+ bdm_info['encryption_options'] = jsonutils.loads(
+ encryption_options)
+
return bdm_info
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 134775c01b..925c98aa88 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -3279,8 +3279,8 @@ class LibvirtDriver(driver.ComputeDriver):
format=source_format,
basename=False)
disk_delta = out_path + '.delta'
- libvirt_utils.create_cow_image(src_back_path, disk_delta,
- src_disk_size)
+ libvirt_utils.create_image(
+ disk_delta, 'qcow2', src_disk_size, backing_file=src_back_path)
try:
self._can_quiesce(instance, image_meta)
@@ -4506,7 +4506,7 @@ class LibvirtDriver(driver.ComputeDriver):
'%dG' % ephemeral_size,
specified_fs)
return
- libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size)
+ libvirt_utils.create_image(target, 'raw', f'{ephemeral_size}G')
# Run as root only for block devices.
disk_api.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
@@ -4515,7 +4515,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_swap(target, swap_mb, context=None):
"""Create a swap file of specified size."""
- libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
+ libvirt_utils.create_image(target, 'raw', f'{swap_mb}M')
nova.privsep.fs.unprivileged_mkfs('swap', target)
@staticmethod
@@ -4638,12 +4638,16 @@ class LibvirtDriver(driver.ComputeDriver):
ignore_bdi_for_swap=False):
booted_from_volume = self._is_booted_from_volume(block_device_info)
- def image(fname, image_type=CONF.libvirt.images_type):
- return self.image_backend.by_name(instance,
- fname + suffix, image_type)
+ def image(
+ fname, image_type=CONF.libvirt.images_type, disk_info_mapping=None
+ ):
+ return self.image_backend.by_name(
+ instance, fname + suffix, image_type,
+ disk_info_mapping=disk_info_mapping)
- def raw(fname):
- return image(fname, image_type='raw')
+ def raw(fname, disk_info_mapping=None):
+ return image(
+ fname, image_type='raw', disk_info_mapping=disk_info_mapping)
created_instance_dir = True
@@ -4662,8 +4666,6 @@ class LibvirtDriver(driver.ComputeDriver):
flavor = instance.get_flavor()
swap_mb = 0
if 'disk.swap' in disk_mapping:
- mapping = disk_mapping['disk.swap']
-
if ignore_bdi_for_swap:
# This is a workaround to support legacy swap resizing,
# which does not touch swap size specified in bdm,
@@ -4677,12 +4679,17 @@ class LibvirtDriver(driver.ComputeDriver):
# leaving the work with bdm only.
swap_mb = flavor['swap']
else:
+ disk_info_mapping = disk_mapping['disk.swap']
+ disk_device = disk_info_mapping['dev']
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
- elif (flavor['swap'] > 0 and
- not block_device.volume_in_mapping(
- mapping['dev'], block_device_info)):
+ elif (
+ flavor['swap'] > 0 and
+ not block_device.volume_in_mapping(
+ disk_device, block_device_info,
+ )
+ ):
swap_mb = flavor['swap']
if swap_mb > 0:
@@ -4715,8 +4722,8 @@ class LibvirtDriver(driver.ComputeDriver):
image_id=disk_images['ramdisk_id'])
created_disks = self._create_and_inject_local_root(
- context, instance, booted_from_volume, suffix, disk_images,
- injection_info, fallback_from_host)
+ context, instance, disk_mapping, booted_from_volume, suffix,
+ disk_images, injection_info, fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = nova.privsep.fs.get_fs_type_for_os_type(
@@ -4729,7 +4736,9 @@ class LibvirtDriver(driver.ComputeDriver):
vm_mode = fields.VMMode.get_from_instance(instance)
ephemeral_gb = instance.flavor.ephemeral_gb
if 'disk.local' in disk_mapping:
- disk_image = image('disk.local')
+ disk_info_mapping = disk_mapping['disk.local']
+ disk_image = image(
+ 'disk.local', disk_info_mapping=disk_info_mapping)
# Short circuit the exists() tests if we already created a disk
created_disks = created_disks or not disk_image.exists()
@@ -4748,7 +4757,9 @@ class LibvirtDriver(driver.ComputeDriver):
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
- disk_image = image(blockinfo.get_eph_disk(idx))
+ disk_name = blockinfo.get_eph_disk(idx)
+ disk_info_mapping = disk_mapping[disk_name]
+ disk_image = image(disk_name, disk_info_mapping=disk_info_mapping)
# Short circuit the exists() tests if we already created a disk
created_disks = created_disks or not disk_image.exists()
@@ -4787,7 +4798,7 @@ class LibvirtDriver(driver.ComputeDriver):
return (created_instance_dir, created_disks)
- def _create_and_inject_local_root(self, context, instance,
+ def _create_and_inject_local_root(self, context, instance, disk_mapping,
booted_from_volume, suffix, disk_images,
injection_info, fallback_from_host):
created_disks = False
@@ -4804,7 +4815,10 @@ class LibvirtDriver(driver.ComputeDriver):
if size == 0 or suffix == '.rescue':
size = None
- backend = self.image_backend.by_name(instance, 'disk' + suffix)
+ disk_name = 'disk' + suffix
+ disk_info_mapping = disk_mapping[disk_name]
+ backend = self.image_backend.by_name(
+ instance, disk_name, disk_info_mapping=disk_info_mapping)
created_disks = not backend.exists()
if instance.task_state == task_states.RESIZE_FINISH:
@@ -5407,7 +5421,9 @@ class LibvirtDriver(driver.ComputeDriver):
if image_type is None:
image_type = CONF.libvirt.images_type
disk_unit = None
- disk = self.image_backend.by_name(instance, name, image_type)
+ disk_info_mapping = disk_mapping[name]
+ disk = self.image_backend.by_name(
+ instance, name, image_type, disk_info_mapping=disk_info_mapping)
if (name == 'disk.config' and image_type == 'rbd' and
not disk.exists()):
# This is likely an older config drive that has not been migrated
@@ -5416,18 +5432,21 @@ class LibvirtDriver(driver.ComputeDriver):
# remove this fall back once we know all config drives are in rbd.
# NOTE(vladikr): make sure that the flat image exist, otherwise
# the image will be created after the domain definition.
- flat_disk = self.image_backend.by_name(instance, name, 'flat')
+ flat_disk = self.image_backend.by_name(
+ instance, name, 'flat', disk_info_mapping=disk_info_mapping)
if flat_disk.exists():
disk = flat_disk
LOG.debug('Config drive not found in RBD, falling back to the '
'instance directory', instance=instance)
- disk_info = disk_mapping[name]
- if 'unit' in disk_mapping and disk_info['bus'] == 'scsi':
+ # The 'unit' key is global to the disk_mapping (rather than for an
+ # individual disk) because it is used solely to track the incrementing
+ # unit number.
+ if 'unit' in disk_mapping and disk_info_mapping['bus'] == 'scsi':
disk_unit = disk_mapping['unit']
- disk_mapping['unit'] += 1 # Increments for the next disk added
+ disk_mapping['unit'] += 1 # Increments for the next disk
conf = disk.libvirt_info(
- disk_info, self.disk_cachemode, flavor['extra_specs'],
- disk_unit=disk_unit, boot_order=boot_order)
+ self.disk_cachemode, flavor['extra_specs'], disk_unit=disk_unit,
+ boot_order=boot_order)
return conf
def _get_guest_fs_config(
@@ -6387,6 +6406,11 @@ class LibvirtDriver(driver.ComputeDriver):
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.locked = True
+ if hardware.get_locked_memory_constraint(flavor, image_meta):
+ if not membacking:
+ membacking = vconfig.LibvirtConfigGuestMemoryBacking()
+ membacking.locked = True
+
return membacking
def _get_memory_backing_hugepages_support(self, inst_topology, numatune):
@@ -10775,8 +10799,8 @@ class LibvirtDriver(driver.ComputeDriver):
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
- libvirt_utils.create_image(info['type'], instance_disk,
- info['virt_disk_size'])
+ libvirt_utils.create_image(
+ instance_disk, info['type'], info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 155267af23..534cc60759 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -82,13 +82,22 @@ class Image(metaclass=abc.ABCMeta):
SUPPORTS_CLONE = False
- def __init__(self, path, source_type, driver_format, is_block_dev=False):
+ def __init__(
+ self,
+ path,
+ source_type,
+ driver_format,
+ is_block_dev=False,
+ disk_info_mapping=None
+ ):
"""Image initialization.
:param path: libvirt's representation of the path of this disk.
:param source_type: block or file
:param driver_format: raw or qcow2
:param is_block_dev:
+ :param disk_info_mapping: disk_info['mapping'][device] metadata
+ specific to this image generated by nova.virt.libvirt.blockinfo.
"""
if (CONF.ephemeral_storage_encryption.enabled and
not self._supports_encryption()):
@@ -105,6 +114,8 @@ class Image(metaclass=abc.ABCMeta):
self.is_block_dev = is_block_dev
self.preallocate = False
+ self.disk_info_mapping = disk_info_mapping
+
# NOTE(dripton): We store lines of json (path, disk_format) in this
# file, for some image types, to prevent attacks based on changing the
# disk_format.
@@ -145,22 +156,23 @@ class Image(metaclass=abc.ABCMeta):
pass
def libvirt_info(
- self, disk_info, cache_mode, extra_specs, boot_order=None,
- disk_unit=None,
+ self, cache_mode, extra_specs, boot_order=None, disk_unit=None,
):
"""Get `LibvirtConfigGuestDisk` filled for this image.
- :disk_info: Metadata generated by libvirt.blockinfo.get_disk_mapping
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
:boot_order: Disk device boot order
"""
- disk_bus = disk_info['bus']
+ if self.disk_info_mapping is None:
+ raise AttributeError(
+ 'Image must have disk_info_mapping to call libvirt_info()')
+ disk_bus = self.disk_info_mapping['bus']
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
- info.source_device = disk_info['type']
+ info.source_device = self.disk_info_mapping['type']
info.target_bus = disk_bus
- info.target_dev = disk_info['dev']
+ info.target_dev = self.disk_info_mapping['dev']
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
info.driver_io = self.driver_io
@@ -522,11 +534,16 @@ class Flat(Image):
when creating a disk from a qcow2 if force_raw_images is not set in config.
"""
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
self.disk_name = disk_name
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Flat, self).__init__(path, "file", "raw", is_block_dev=False)
+ super().__init__(
+ path, "file", "raw", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
@@ -614,10 +631,15 @@ class Flat(Image):
class Qcow2(Image):
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Qcow2, self).__init__(path, "file", "qcow2", is_block_dev=False)
+ super().__init__(
+ path, "file", "qcow2", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
@@ -631,7 +653,8 @@ class Qcow2(Image):
@utils.synchronized(filename, external=True, lock_path=self.lock_path)
def create_qcow2_image(base, target, size):
- libvirt_utils.create_cow_image(base, target, size)
+ libvirt_utils.create_image(
+ target, 'qcow2', size, backing_file=base)
# Download the unmodified base image unless we already have a copy.
if not os.path.exists(base):
@@ -695,7 +718,10 @@ class Lvm(Image):
def escape(filename):
return filename.replace('_', '__')
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None,
+ disk_info_mapping=None
+ ):
self.ephemeral_key_uuid = instance.get('ephemeral_key_uuid')
if self.ephemeral_key_uuid is not None:
@@ -724,7 +750,10 @@ class Lvm(Image):
self.lv_path = os.path.join('/dev', self.vg, self.lv)
path = '/dev/mapper/' + dmcrypt.volume_name(self.lv)
- super(Lvm, self).__init__(path, "block", "raw", is_block_dev=True)
+ super(Lvm, self).__init__(
+ path, "block", "raw", is_block_dev=True,
+ disk_info_mapping=disk_info_mapping
+ )
# TODO(sbauza): Remove the config option usage and default the
# LVM logical volume creation to preallocate the full size only.
@@ -832,7 +861,9 @@ class Rbd(Image):
SUPPORTS_CLONE = True
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
if not CONF.libvirt.images_rbd_pool:
raise RuntimeError(_('You should specify'
' images_rbd_pool'
@@ -854,31 +885,32 @@ class Rbd(Image):
if self.driver.ceph_conf:
path += ':conf=' + self.driver.ceph_conf
- super(Rbd, self).__init__(path, "block", "rbd", is_block_dev=False)
+ super().__init__(
+ path, "block", "rbd", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.discard_mode = CONF.libvirt.hw_disk_discard
def libvirt_info(
- self, disk_info, cache_mode, extra_specs, boot_order=None,
- disk_unit=None,
+ self, cache_mode, extra_specs, boot_order=None, disk_unit=None
):
"""Get `LibvirtConfigGuestDisk` filled for this image.
- :disk_info: Metadata generated by libvirt.blockinfo.get_disk_mapping
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
:boot_order: Disk device boot order
"""
info = vconfig.LibvirtConfigGuestDisk()
- disk_bus = disk_info['bus']
+ disk_bus = self.disk_info_mapping['bus']
hosts, ports = self.driver.get_mon_addrs()
- info.source_device = disk_info['type']
+ info.source_device = self.disk_info_mapping['type']
info.driver_format = 'raw'
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
info.target_bus = disk_bus
- info.target_dev = disk_info['dev']
+ info.target_dev = self.disk_info_mapping['dev']
info.source_type = 'network'
info.source_protocol = 'rbd'
info.source_name = '%s/%s' % (self.driver.pool, self.rbd_name)
@@ -1195,10 +1227,15 @@ class Rbd(Image):
class Ploop(Image):
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Ploop, self).__init__(path, "file", "ploop", is_block_dev=False)
+ super().__init__(
+ path, "file", "ploop", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.resolve_driver_format()
@@ -1301,13 +1338,14 @@ class Backend(object):
raise RuntimeError(_('Unknown image_type=%s') % image_type)
return image
- def by_name(self, instance, name, image_type=None):
+ def by_name(self, instance, name, image_type=None, disk_info_mapping=None):
"""Return an Image object for a disk with the given name.
:param instance: the instance which owns this disk
:param name: The name of the disk
:param image_type: (Optional) Image type.
Default is CONF.libvirt.images_type.
+ :param disk_info_mapping: (Optional) Disk info mapping dict
:return: An Image object for the disk with given name and instance.
:rtype: Image
"""
@@ -1316,7 +1354,9 @@ class Backend(object):
# default inline in the method, and not in the kwarg declaration.
image_type = image_type or CONF.libvirt.images_type
backend = self.backend(image_type)
- return backend(instance=instance, disk_name=name)
+ return backend(
+ instance=instance, disk_name=name,
+ disk_info_mapping=disk_info_mapping)
def by_libvirt_path(self, instance, path, image_type=None):
"""Return an Image object for a disk with the given libvirt path.
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 834f242c79..c673818603 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -110,54 +110,48 @@ VTPM_DIR = '/var/lib/libvirt/swtpm/'
def create_image(
- disk_format: str, path: str, size: ty.Union[str, int],
+ path: str,
+ disk_format: str,
+ disk_size: ty.Optional[ty.Union[str, int]],
+ backing_file: ty.Optional[str] = None,
) -> None:
- """Create a disk image
-
- :param disk_format: Disk image format (as known by qemu-img)
+ """Disk image creation with qemu-img
:param path: Desired location of the disk image
- :param size: Desired size of disk image. May be given as an int or
- a string. If given as an int, it will be interpreted
- as bytes. If it's a string, it should consist of a number
- with an optional suffix ('K' for Kibibytes,
- M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
- If no suffix is given, it will be interpreted as bytes.
- """
- processutils.execute('qemu-img', 'create', '-f', disk_format, path, size)
-
-
-def create_cow_image(
- backing_file: ty.Optional[str], path: str, size: ty.Optional[int] = None,
-) -> None:
- """Create COW image
-
- Creates a COW image with the given backing file
-
- :param backing_file: Existing image on which to base the COW image
- :param path: Desired location of the COW image
+ :param disk_format: Disk image format (as known by qemu-img)
+ :param disk_size: Desired size of disk image. May be given as an int or
+ a string. If given as an int, it will be interpreted as bytes. If it's
+ a string, it should consist of a number with an optional suffix ('K'
+ for Kibibytes, M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
+ If no suffix is given, it will be interpreted as bytes.
+ Can be None in the case of a COW image.
+ :param backing_file: (Optional) Backing file to use.
"""
- base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
+ base_cmd = [
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f', disk_format
+ ]
cow_opts = []
+
if backing_file:
base_details = images.qemu_img_info(backing_file)
- cow_opts += ['backing_file=%s' % backing_file]
- cow_opts += ['backing_fmt=%s' % base_details.file_format]
- else:
- base_details = None
- # Explicitly inherit the value of 'cluster_size' property of a qcow2
- # overlay image from its backing file. This can be useful in cases
- # when people create a base image with a non-default 'cluster_size'
- # value or cases when images were created with very old QEMU
- # versions which had a different default 'cluster_size'.
- if base_details and base_details.cluster_size is not None:
- cow_opts += ['cluster_size=%s' % base_details.cluster_size]
- if size is not None:
- cow_opts += ['size=%s' % size]
- if cow_opts:
+ cow_opts += [
+ f'backing_file={backing_file}',
+ f'backing_fmt={base_details.file_format}'
+ ]
+ # Explicitly inherit the value of 'cluster_size' property of a qcow2
+ # overlay image from its backing file. This can be useful in cases when
+ # people create a base image with a non-default 'cluster_size' value or
+ # cases when images were created with very old QEMU versions which had
+ # a different default 'cluster_size'.
+ if base_details.cluster_size is not None:
+ cow_opts += [f'cluster_size={base_details.cluster_size}']
+
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
cow_opts = ['-o', csv_opts]
+
cmd = base_cmd + cow_opts + [path]
+ if disk_size is not None:
+ cmd += [str(disk_size)]
processutils.execute(*cmd)
diff --git a/releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml b/releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml
new file mode 100644
index 0000000000..47c6b38265
--- /dev/null
+++ b/releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Added support for rebuilding a volume-backed instance with a different
+ image. This is achieved by reimaging the boot volume i.e. writing new
+ image on the boot volume at cinder side.
+ Previously rebuilding volume-backed instances with same image was
+ possible but this feature allows rebuilding volume-backed instances
+ with a different image than the existing one in the boot volume.
+ This is supported starting from API microversion 2.93.
diff --git a/releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml b/releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml
new file mode 100644
index 0000000000..72d6e763aa
--- /dev/null
+++ b/releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Add new ``hw:locked_memory`` extra spec and ``hw_locked_memory`` image
+ property to lock memory on libvirt guest. Locking memory marks the guest
+ memory allocations as unmovable and unswappable.
+ ``hw:locked_memory`` extra spec and ``hw_locked_memory`` image property
+ accept boolean values in string format like 'Yes' or 'false' value.
+ Exception `LockMemoryForbidden` will raise, if you set lock memory value
+ but not set either flavor extra spec
+ ``hw:mem_page_size`` or image property ``hw_mem_page_size``,
+ so we can ensure that the scheduler can actually account for this correctly
+ and prevent out of memory events.
diff --git a/releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml b/releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml
new file mode 100644
index 0000000000..171b07d025
--- /dev/null
+++ b/releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml
@@ -0,0 +1,36 @@
+---
+features:
+ - |
+ The Nova policies have been modified to drop the system scope. Every
+ API policy is scoped to project. This means that system scoped users
+ will get 403 permission denied error.
+
+ Also, the project reader role is ready to use. Users with reader role
+ can only perform the read-only operations within their project. This
+ role can be used for the audit purposes.
+
+ Currently, nova supports the following roles:
+
+ * ``admin`` (Legacy admin)
+ * ``project member``
+ * ``project reader``
+
+ For the details on what changed from the existing policy, please refer
+ to the `RBAC new guidelines`_. We have implemented only phase-1 of the
+ `RBAC new guidelines`_.
+ Currently, scope checks and new defaults are disabled by default. You can
+ enable them by switching the below config option in ``nova.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=True
+ enforce_scope=True
+
+ We recommend to enable the both scope as well new defaults together
+ otherwise you may experience some late failures with unclear error
+ messages.
+
+ Please refer `Policy New Defaults`_ for detail about policy new defaults
+ and migration plan.
+
+ .. _`RBAC new guidelines`: https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html#phase-1
+ .. _`Policy New Defaults`: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html